[llvm-branch-commits] [llvm] 79cbb00 - [RISCV] Don't use tail agnostic policy on instructions where destination is tied to source

Craig Topper via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Tue Dec 29 10:43:31 PST 2020


Author: Craig Topper
Date: 2020-12-29T10:37:58-08:00
New Revision: 79cbb003c53009e5ca35b804bb7655dba97776e7

URL: https://github.com/llvm/llvm-project/commit/79cbb003c53009e5ca35b804bb7655dba97776e7
DIFF: https://github.com/llvm/llvm-project/commit/79cbb003c53009e5ca35b804bb7655dba97776e7.diff

LOG: [RISCV] Don't use tail agnostic policy on instructions where destination is tied to source

If the destination is tied, then user has some control of the
register used for input. They would have the ability to control
the value of any tail elements. By using tail agnostic we take
this option away from them.

Its not clear that the intrinsics are defined such that this isn't
supposed to work. And undisturbed is a valid implementation for agnostic
so code wouldn't even fail to work on all systems if we always used
agnostic.

The vcompress intrinsic is defined to require tail undisturbed so
at minimum we need this for that instruction or need to redefine
the intrinsic.

I've made an exception here for vmv.s.x/fmv.s.f and reduction
instructions which only write to element 0 regardless of the tail
policy. This allows us to keep the agnostic policy on those which
should allow better redundant vsetvli removal.

An enhancement would be to check for undef input and keep the
agnostic policy, but we don't have good test coverage for that yet.

Reviewed By: khchen

Differential Revision: https://reviews.llvm.org/D93878

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
    llvm/lib/Target/RISCV/Utils/RISCVBaseInfo.h
    llvm/test/CodeGen/RISCV/rvv/vaadd-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vaadd-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vaaddu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vaaddu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vadd-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vand-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vand-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vasub-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vasub-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vasubu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vasubu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vdiv-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vdivu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfadd-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfadd-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfdiv-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfdiv-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfmacc-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfmacc-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfmadd-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfmadd-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfmax-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfmax-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfmin-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfmin-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfmsac-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfmsac-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfmsub-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfmsub-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfmul-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfmul-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfrsub-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfrsub-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfsub-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfsub-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfwadd-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfwadd-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfwmul-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfwmul-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfwsub-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfwsub-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vid-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vid-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/viota-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/viota-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vle-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vle-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vleff-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vleff-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vlse-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vlse-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vlxe-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vlxe-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmacc-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmacc-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmadd-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmadd-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmax-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmax-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmaxu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmaxu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmfeq-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmfeq-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmfge-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmfge-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmfgt-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmfgt-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmfle-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmfle-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmflt-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmflt-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmfne-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmfne-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmin-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmin-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vminu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vminu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmsbf-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmsbf-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmseq-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmseq-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmsgt-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmsgt-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmsif-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmsif-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmsle-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmsle-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmsleu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmsleu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmslt-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmslt-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmsltu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmsltu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmsne-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmsne-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmsof-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmsof-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmul-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmul-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmulh-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmulh-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmulhu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmulhu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vnclip-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vnclip-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vnclipu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vnmsac-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vnmsac-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vnmsub-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vnmsub-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vnsra-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vnsra-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vnsrl-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vnsrl-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vor-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vrem-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vremu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vrsub-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vrsub-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vsadd-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vsadd-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vsaddu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vslide1down-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vslide1down-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vslide1up-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vslidedown-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vslidedown-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vslideup-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vslideup-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vssub-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vssub-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vssubu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vssubu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vsub-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vsub-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vwadd-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vwadd-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vwaddu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vwaddu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vwmacc-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vwmacc-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vwmul-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vwmul-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vwmulu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vwmulu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vwsub-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vwsub-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vwsubu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vwsubu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vxor-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vxor-rv64.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index b643f76e1fb5..10a3889a8b9e 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -2114,7 +2114,7 @@ static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
 
 static MachineBasicBlock *addVSetVL(MachineInstr &MI, MachineBasicBlock *BB,
                                     int VLIndex, unsigned SEWIndex,
-                                    unsigned VLMul) {
+                                    unsigned VLMul, bool WritesElement0) {
   MachineFunction &MF = *BB->getParent();
   DebugLoc DL = MI.getDebugLoc();
   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
@@ -2141,9 +2141,19 @@ static MachineBasicBlock *addVSetVL(MachineInstr &MI, MachineBasicBlock *BB,
     MIB.addReg(RISCV::X0, RegState::Define | RegState::Dead)
         .addReg(RISCV::X0, RegState::Kill);
 
+  // Default to tail agnostic unless the destination is tied to a source. In
+  // that case the user would have some control over the tail values. The tail
+  // policy is also ignored on instructions that only update element 0 like
+  // vmv.s.x or reductions so use agnostic there to match the common case.
+  // FIXME: This is conservatively correct, but we might want to detect that
+  // the input is undefined.
+  bool TailAgnostic = true;
+  if (MI.isRegTiedToUseOperand(0) && !WritesElement0)
+    TailAgnostic = false;
+
   // For simplicity we reuse the vtype representation here.
   MIB.addImm(RISCVVType::encodeVTYPE(Multiplier, ElementWidth,
-                                     /*TailAgnostic*/ true,
+                                     /*TailAgnostic*/ TailAgnostic,
                                      /*MaskAgnostic*/ false));
 
   // Remove (now) redundant operands from pseudo
@@ -2164,9 +2174,10 @@ RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
           RISCVVPseudosTable::getPseudoInfo(MI.getOpcode())) {
     int VLIndex = RVV->getVLIndex();
     int SEWIndex = RVV->getSEWIndex();
+    bool WritesElement0 = RVV->writesElement0();
 
     assert(SEWIndex >= 0 && "SEWIndex must be >= 0");
-    return addVSetVL(MI, BB, VLIndex, SEWIndex, RVV->VLMul);
+    return addVSetVL(MI, BB, VLIndex, SEWIndex, RVV->VLMul, WritesElement0);
   }
 
   switch (MI.getOpcode()) {

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index b50109eecac0..e6e3846d7c9a 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -263,6 +263,7 @@ class RISCVVPseudo {
   bits<8> MergeOpIndex = InvalidIndex.V;
   bits<3> VLMul;
   bit HasDummyMask = 0;
+  bit WritesElement0 = 0;
 }
 
 // The actual table.
@@ -270,7 +271,7 @@ def RISCVVPseudosTable : GenericTable {
   let FilterClass = "RISCVVPseudo";
   let CppTypeName = "PseudoInfo";
   let Fields = [ "Pseudo", "BaseInstr", "VLIndex", "SEWIndex", "MergeOpIndex",
-                 "VLMul", "HasDummyMask" ];
+                 "VLMul", "HasDummyMask", "WritesElement0" ];
   let PrimaryKey = [ "Pseudo" ];
   let PrimaryKeyName = "getPseudoInfo";
 }
@@ -1159,8 +1160,10 @@ multiclass VPseudoBinaryM_VX_VI {
 }
 
 multiclass VPseudoReductionV_VS {
-  foreach m = MxList.m in
+  foreach m = MxList.m in {
+    let WritesElement0 = 1 in
     defm _VS : VPseudoTernary<V_M1.vrclass, m.vrclass, V_M1.vrclass, m>;
+  }
 }
 
 //===----------------------------------------------------------------------===//
@@ -2477,7 +2480,7 @@ let mayLoad = 0, mayStore = 0, hasSideEffects = 0, usesCustomInserter = 1,
       def PseudoVMV_X_S # "_" # m.MX: Pseudo<(outs GPR:$rd),
                                              (ins m.vrclass:$rs2, ixlenimm:$sew),
                                              []>, RISCVVPseudo;
-      let VLIndex = 3, SEWIndex = 4, BaseInstr = VMV_S_X,
+      let VLIndex = 3, SEWIndex = 4, BaseInstr = VMV_S_X, WritesElement0 = 1,
           Constraints = "$rd = $rs1" in
       def PseudoVMV_S_X # "_" # m.MX: Pseudo<(outs m.vrclass:$rd),
                                              (ins m.vrclass:$rs1, GPR:$rs2,
@@ -2502,7 +2505,7 @@ let mayLoad = 0, mayStore = 0, hasSideEffects = 0, usesCustomInserter = 1,
                                                (ins m.vrclass:$rs2,
                                                     ixlenimm:$sew),
                                                []>, RISCVVPseudo;
-      let VLIndex = 3, SEWIndex = 4, BaseInstr = VFMV_S_F,
+      let VLIndex = 3, SEWIndex = 4, BaseInstr = VFMV_S_F, WritesElement0 = 1,
           Constraints = "$rd = $rs1" in
       def PseudoVFMV_S_F # "_" # m.MX : Pseudo<(outs m.vrclass:$rd),
                                                (ins m.vrclass:$rs1, FPR32:$rs2,

diff  --git a/llvm/lib/Target/RISCV/Utils/RISCVBaseInfo.h b/llvm/lib/Target/RISCV/Utils/RISCVBaseInfo.h
index 4cd32b29b04d..df711f3c54db 100644
--- a/llvm/lib/Target/RISCV/Utils/RISCVBaseInfo.h
+++ b/llvm/lib/Target/RISCV/Utils/RISCVBaseInfo.h
@@ -378,6 +378,7 @@ struct PseudoInfo {
   uint8_t MergeOpIndex;
   uint8_t VLMul;
   bool HasDummyMask;
+  bool WritesElement0;
 
   int getVLIndex() const { return static_cast<int8_t>(VLIndex); }
 
@@ -386,6 +387,8 @@ struct PseudoInfo {
   int getMergeOpIndex() const { return static_cast<int8_t>(MergeOpIndex); }
 
   bool hasDummyMask() const { return HasDummyMask; }
+
+  bool writesElement0() const { return WritesElement0; }
 };
 
 using namespace RISCV;

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vaadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vaadd-rv32.ll
index a100fc08b696..bd8046ff79b9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vaadd-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vaadd-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vaadd.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vaadd_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vaadd.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vaadd.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vaadd_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vaadd.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vaadd.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vaadd_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vaadd.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vaadd.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vaadd_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vaadd.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vaadd.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vaadd_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vaadd.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vaadd.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vaadd_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv64i8_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vaadd.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -308,7 +308,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vaadd.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vaadd_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vaadd.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vaadd.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vaadd_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vaadd.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vaadd.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vaadd_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vaadd.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vaadd.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vaadd_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vaadd.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vaadd.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vaadd_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vaadd.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vaadd.mask.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vaadd_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv32i16_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vaadd.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vaadd.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vaadd_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vaadd.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -588,7 +588,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vaadd.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vaadd_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vaadd.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -628,7 +628,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vaadd.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vaadd_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vaadd.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -668,7 +668,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vaadd.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vaadd_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vaadd.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -708,7 +708,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vaadd.mask.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vaadd_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv16i32_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vaadd.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vaadd.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vaadd_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vaadd.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -788,7 +788,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vaadd.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vaadd_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vaadd.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -828,7 +828,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vaadd.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vaadd_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vaadd.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -868,7 +868,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vaadd.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vaadd_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vaadd.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -908,7 +908,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vaadd.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vaadd_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vaadd.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -948,7 +948,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vaadd.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vaadd_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vaadd.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -988,7 +988,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vaadd.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vaadd_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vaadd.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vaadd.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vaadd_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vaadd.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vaadd.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vaadd_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vaadd.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vaadd.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vaadd_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vaadd.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vaadd.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vaadd_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vaadd.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vaadd.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vaadd_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vaadd.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1228,7 +1228,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vaadd.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vaadd_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vaadd.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1268,7 +1268,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vaadd.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vaadd_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vaadd.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1308,7 +1308,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vaadd.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vaadd_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vaadd.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1348,7 +1348,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vaadd.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vaadd_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vaadd.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1388,7 +1388,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vaadd.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vaadd_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vaadd.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1428,7 +1428,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vaadd.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vaadd_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vaadd.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vaadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vaadd-rv64.ll
index 727adb8a4232..46d2d66de020 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vaadd-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vaadd-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vaadd.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vaadd_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vaadd.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vaadd.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vaadd_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vaadd.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vaadd.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vaadd_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vaadd.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vaadd.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vaadd_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vaadd.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vaadd.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vaadd_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vaadd.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vaadd.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vaadd_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv64i8_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vaadd.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -308,7 +308,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vaadd.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vaadd_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vaadd.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vaadd.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vaadd_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vaadd.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vaadd.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vaadd_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vaadd.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vaadd.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vaadd_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vaadd.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vaadd.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vaadd_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vaadd.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vaadd.mask.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vaadd_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv32i16_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vaadd.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vaadd.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vaadd_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vaadd.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -588,7 +588,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vaadd.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vaadd_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vaadd.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -628,7 +628,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vaadd.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vaadd_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vaadd.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -668,7 +668,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vaadd.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vaadd_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vaadd.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -708,7 +708,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vaadd.mask.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vaadd_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv16i32_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vaadd.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vaadd.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vaadd_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i64_nxv1i64_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vaadd.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -788,7 +788,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vaadd.mask.nxv2i64.nxv2i64(
 define <vscale x 2 x i64> @intrinsic_vaadd_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv2i64_nxv2i64_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vaadd.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -828,7 +828,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vaadd.mask.nxv4i64.nxv4i64(
 define <vscale x 4 x i64> @intrinsic_vaadd_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv4i64_nxv4i64_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vaadd.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -868,7 +868,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vaadd.mask.nxv8i64.nxv8i64(
 define <vscale x 8 x i64> @intrinsic_vaadd_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv8i64_nxv8i64_nxv8i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vaadd.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -908,7 +908,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vaadd.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vaadd_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vaadd.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -948,7 +948,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vaadd.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vaadd_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vaadd.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -988,7 +988,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vaadd.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vaadd_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vaadd.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vaadd.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vaadd_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vaadd.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vaadd.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vaadd_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vaadd.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vaadd.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vaadd_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vaadd.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vaadd.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vaadd_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vaadd.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vaadd.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vaadd_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vaadd.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1228,7 +1228,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vaadd.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vaadd_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vaadd.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1268,7 +1268,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vaadd.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vaadd_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vaadd.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1308,7 +1308,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vaadd.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vaadd_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vaadd.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1348,7 +1348,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vaadd.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vaadd_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vaadd.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1388,7 +1388,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vaadd.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vaadd_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vaadd.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1428,7 +1428,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vaadd.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vaadd_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vaadd.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1468,7 +1468,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vaadd.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vaadd_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vaadd.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1508,7 +1508,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vaadd.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vaadd_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vaadd.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1548,7 +1548,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vaadd.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vaadd_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vaadd.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1588,7 +1588,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vaadd.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vaadd_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vaadd.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1628,7 +1628,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vaadd.mask.nxv1i64.i64(
 define <vscale x 1 x i64> @intrinsic_vaadd_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv1i64_nxv1i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vaadd.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1668,7 +1668,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vaadd.mask.nxv2i64.i64(
 define <vscale x 2 x i64> @intrinsic_vaadd_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv2i64_nxv2i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vaadd.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1708,7 +1708,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vaadd.mask.nxv4i64.i64(
 define <vscale x 4 x i64> @intrinsic_vaadd_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv4i64_nxv4i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vaadd.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1748,7 +1748,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vaadd.mask.nxv8i64.i64(
 define <vscale x 8 x i64> @intrinsic_vaadd_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv8i64_nxv8i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vaadd.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv32.ll
index 7a240b49e89d..ad63b9eb4d16 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vaaddu.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vaaddu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vaaddu.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vaaddu.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vaaddu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vaaddu.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vaaddu.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vaaddu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vaaddu.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vaaddu.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vaaddu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vaaddu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vaaddu.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vaaddu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vaaddu.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vaaddu.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vaaddu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vaaddu.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vaaddu.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vaaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vaaddu.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -308,7 +308,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vaaddu.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vaaddu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vaaddu.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vaaddu.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vaaddu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vaaddu.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vaaddu.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vaaddu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vaaddu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vaaddu.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vaaddu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vaaddu.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vaaddu.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vaaddu_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vaaddu.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vaaddu.mask.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vaaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vaaddu.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vaaddu.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vaaddu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vaaddu.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -588,7 +588,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vaaddu.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vaaddu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vaaddu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -628,7 +628,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vaaddu.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vaaddu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vaaddu.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -668,7 +668,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vaaddu.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vaaddu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vaaddu.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -708,7 +708,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vaaddu.mask.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vaaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vaaddu.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vaaddu.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vaaddu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vaaddu.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -788,7 +788,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vaaddu.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vaaddu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vaaddu.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -828,7 +828,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vaaddu.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vaaddu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vaaddu.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -868,7 +868,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vaaddu.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vaaddu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vaaddu.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -908,7 +908,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vaaddu.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vaaddu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vaaddu.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -948,7 +948,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vaaddu.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vaaddu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vaaddu.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -988,7 +988,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vaaddu.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vaaddu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vaaddu.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vaaddu.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vaaddu_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vaaddu.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vaaddu.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vaaddu_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vaaddu.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vaaddu.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vaaddu_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vaaddu.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vaaddu.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vaaddu_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vaaddu.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vaaddu.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vaaddu_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vaaddu.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1228,7 +1228,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vaaddu.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vaaddu_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vaaddu.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1268,7 +1268,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vaaddu.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vaaddu_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vaaddu.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1308,7 +1308,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vaaddu.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vaaddu_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vaaddu.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1348,7 +1348,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vaaddu.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vaaddu_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vaaddu.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1388,7 +1388,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vaaddu.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vaaddu_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vaaddu.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1428,7 +1428,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vaaddu.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vaaddu_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vaaddu.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv64.ll
index a3d347ab0628..19820b390123 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vaaddu.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vaaddu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vaaddu.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vaaddu.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vaaddu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vaaddu.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vaaddu.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vaaddu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vaaddu.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vaaddu.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vaaddu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vaaddu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vaaddu.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vaaddu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vaaddu.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vaaddu.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vaaddu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vaaddu.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vaaddu.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vaaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vaaddu.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -308,7 +308,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vaaddu.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vaaddu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vaaddu.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vaaddu.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vaaddu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vaaddu.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vaaddu.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vaaddu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vaaddu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vaaddu.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vaaddu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vaaddu.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vaaddu.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vaaddu_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vaaddu.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vaaddu.mask.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vaaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vaaddu.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vaaddu.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vaaddu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vaaddu.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -588,7 +588,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vaaddu.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vaaddu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vaaddu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -628,7 +628,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vaaddu.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vaaddu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vaaddu.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -668,7 +668,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vaaddu.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vaaddu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vaaddu.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -708,7 +708,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vaaddu.mask.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vaaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vaaddu.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vaaddu.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vaaddu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv1i64_nxv1i64_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vaaddu.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -788,7 +788,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vaaddu.mask.nxv2i64.nxv2i64(
 define <vscale x 2 x i64> @intrinsic_vaaddu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv2i64_nxv2i64_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vaaddu.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -828,7 +828,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vaaddu.mask.nxv4i64.nxv4i64(
 define <vscale x 4 x i64> @intrinsic_vaaddu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv4i64_nxv4i64_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vaaddu.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -868,7 +868,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vaaddu.mask.nxv8i64.nxv8i64(
 define <vscale x 8 x i64> @intrinsic_vaaddu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv8i64_nxv8i64_nxv8i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vaaddu.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -908,7 +908,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vaaddu.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vaaddu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vaaddu.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -948,7 +948,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vaaddu.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vaaddu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vaaddu.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -988,7 +988,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vaaddu.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vaaddu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vaaddu.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vaaddu.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vaaddu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vaaddu.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vaaddu.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vaaddu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vaaddu.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vaaddu.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vaaddu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vaaddu.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vaaddu.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vaaddu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vaaddu.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vaaddu.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vaaddu_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vaaddu.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1228,7 +1228,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vaaddu.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vaaddu_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vaaddu.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1268,7 +1268,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vaaddu.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vaaddu_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vaaddu.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1308,7 +1308,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vaaddu.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vaaddu_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vaaddu.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1348,7 +1348,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vaaddu.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vaaddu_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vaaddu.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1388,7 +1388,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vaaddu.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vaaddu_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vaaddu.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1428,7 +1428,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vaaddu.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vaaddu_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vaaddu.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1468,7 +1468,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vaaddu.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vaaddu_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vaaddu.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1508,7 +1508,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vaaddu.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vaaddu_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vaaddu.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1548,7 +1548,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vaaddu.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vaaddu_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vaaddu.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1588,7 +1588,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vaaddu.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vaaddu_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vaaddu.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1628,7 +1628,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vaaddu.mask.nxv1i64.i64(
 define <vscale x 1 x i64> @intrinsic_vaaddu_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv1i64_nxv1i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vaaddu.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1668,7 +1668,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vaaddu.mask.nxv2i64.i64(
 define <vscale x 2 x i64> @intrinsic_vaaddu_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv2i64_nxv2i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vaaddu.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1708,7 +1708,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vaaddu.mask.nxv4i64.i64(
 define <vscale x 4 x i64> @intrinsic_vaaddu_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv4i64_nxv4i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vaaddu.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1748,7 +1748,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vaaddu.mask.nxv8i64.i64(
 define <vscale x 8 x i64> @intrinsic_vaaddu_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv8i64_nxv8i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vaaddu.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll
index 756bba5b7786..f0c92973188b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vadd_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vadd_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vadd_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vadd_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vadd_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vadd_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv64i8_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -308,7 +308,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vadd_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vadd_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vadd_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vadd_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vadd_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vadd_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv32i16_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vadd_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -588,7 +588,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vadd_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -628,7 +628,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vadd_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -668,7 +668,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vadd_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -708,7 +708,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vadd_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i32_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vadd_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -788,7 +788,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vadd_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -828,7 +828,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vadd_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -868,7 +868,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vadd_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -908,7 +908,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vadd_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -948,7 +948,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vadd_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -988,7 +988,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vadd_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vadd_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vadd_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vadd_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vadd_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vadd_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1228,7 +1228,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vadd_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1268,7 +1268,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vadd_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1308,7 +1308,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vadd_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1348,7 +1348,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vadd_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1388,7 +1388,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vadd_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1428,7 +1428,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vadd_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1456,7 +1456,7 @@ entry:
 define <vscale x 1 x i8> @intrinsic_vadd_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1484,7 +1484,7 @@ entry:
 define <vscale x 2 x i8> @intrinsic_vadd_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1512,7 +1512,7 @@ entry:
 define <vscale x 4 x i8> @intrinsic_vadd_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1540,7 +1540,7 @@ entry:
 define <vscale x 8 x i8> @intrinsic_vadd_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1568,7 +1568,7 @@ entry:
 define <vscale x 16 x i8> @intrinsic_vadd_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1596,7 +1596,7 @@ entry:
 define <vscale x 32 x i8> @intrinsic_vadd_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1624,7 +1624,7 @@ entry:
 define <vscale x 64 x i8> @intrinsic_vadd_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1652,7 +1652,7 @@ entry:
 define <vscale x 1 x i16> @intrinsic_vadd_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1680,7 +1680,7 @@ entry:
 define <vscale x 2 x i16> @intrinsic_vadd_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1708,7 +1708,7 @@ entry:
 define <vscale x 4 x i16> @intrinsic_vadd_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1736,7 +1736,7 @@ entry:
 define <vscale x 8 x i16> @intrinsic_vadd_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1764,7 +1764,7 @@ entry:
 define <vscale x 16 x i16> @intrinsic_vadd_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1792,7 +1792,7 @@ entry:
 define <vscale x 32 x i16> @intrinsic_vadd_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1820,7 +1820,7 @@ entry:
 define <vscale x 1 x i32> @intrinsic_vadd_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1848,7 +1848,7 @@ entry:
 define <vscale x 2 x i32> @intrinsic_vadd_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1876,7 +1876,7 @@ entry:
 define <vscale x 4 x i32> @intrinsic_vadd_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1904,7 +1904,7 @@ entry:
 define <vscale x 8 x i32> @intrinsic_vadd_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1932,7 +1932,7 @@ entry:
 define <vscale x 16 x i32> @intrinsic_vadd_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-rv64.ll
index 490ab42d4683..e658ec776ac4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vadd-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vadd-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vadd_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vadd_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vadd_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vadd_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vadd_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vadd_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv64i8_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -308,7 +308,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vadd_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vadd_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vadd_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vadd_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vadd_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vadd_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv32i16_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vadd_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -588,7 +588,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vadd_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -628,7 +628,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vadd_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -668,7 +668,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vadd_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -708,7 +708,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vadd_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i32_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vadd_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i64_nxv1i64_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -788,7 +788,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.nxv2i64(
 define <vscale x 2 x i64> @intrinsic_vadd_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i64_nxv2i64_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -828,7 +828,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.nxv4i64(
 define <vscale x 4 x i64> @intrinsic_vadd_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i64_nxv4i64_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -868,7 +868,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.nxv8i64(
 define <vscale x 8 x i64> @intrinsic_vadd_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i64_nxv8i64_nxv8i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -908,7 +908,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vadd_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -948,7 +948,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vadd_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -988,7 +988,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vadd_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vadd_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vadd_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vadd_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vadd_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vadd_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1228,7 +1228,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vadd_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1268,7 +1268,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vadd_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1308,7 +1308,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vadd_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1348,7 +1348,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vadd_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1388,7 +1388,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vadd_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1428,7 +1428,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vadd_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1468,7 +1468,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vadd_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1508,7 +1508,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vadd_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1548,7 +1548,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vadd_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1588,7 +1588,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vadd_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1628,7 +1628,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.i64(
 define <vscale x 1 x i64> @intrinsic_vadd_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i64_nxv1i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1668,7 +1668,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.i64(
 define <vscale x 2 x i64> @intrinsic_vadd_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i64_nxv2i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1708,7 +1708,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.i64(
 define <vscale x 4 x i64> @intrinsic_vadd_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i64_nxv4i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1748,7 +1748,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.i64(
 define <vscale x 8 x i64> @intrinsic_vadd_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i64_nxv8i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1776,7 +1776,7 @@ entry:
 define <vscale x 1 x i8> @intrinsic_vadd_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1804,7 +1804,7 @@ entry:
 define <vscale x 2 x i8> @intrinsic_vadd_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1832,7 +1832,7 @@ entry:
 define <vscale x 4 x i8> @intrinsic_vadd_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1860,7 +1860,7 @@ entry:
 define <vscale x 8 x i8> @intrinsic_vadd_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1888,7 +1888,7 @@ entry:
 define <vscale x 16 x i8> @intrinsic_vadd_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1916,7 +1916,7 @@ entry:
 define <vscale x 32 x i8> @intrinsic_vadd_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1944,7 +1944,7 @@ entry:
 define <vscale x 64 x i8> @intrinsic_vadd_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1972,7 +1972,7 @@ entry:
 define <vscale x 1 x i16> @intrinsic_vadd_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2000,7 +2000,7 @@ entry:
 define <vscale x 2 x i16> @intrinsic_vadd_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2028,7 +2028,7 @@ entry:
 define <vscale x 4 x i16> @intrinsic_vadd_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2056,7 +2056,7 @@ entry:
 define <vscale x 8 x i16> @intrinsic_vadd_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2084,7 +2084,7 @@ entry:
 define <vscale x 16 x i16> @intrinsic_vadd_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2112,7 +2112,7 @@ entry:
 define <vscale x 32 x i16> @intrinsic_vadd_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -2140,7 +2140,7 @@ entry:
 define <vscale x 1 x i32> @intrinsic_vadd_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2168,7 +2168,7 @@ entry:
 define <vscale x 2 x i32> @intrinsic_vadd_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2196,7 +2196,7 @@ entry:
 define <vscale x 4 x i32> @intrinsic_vadd_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2224,7 +2224,7 @@ entry:
 define <vscale x 8 x i32> @intrinsic_vadd_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2252,7 +2252,7 @@ entry:
 define <vscale x 16 x i32> @intrinsic_vadd_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2280,7 +2280,7 @@ entry:
 define <vscale x 1 x i64> @intrinsic_vadd_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i64_nxv1i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2308,7 +2308,7 @@ entry:
 define <vscale x 2 x i64> @intrinsic_vadd_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i64_nxv2i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2336,7 +2336,7 @@ entry:
 define <vscale x 4 x i64> @intrinsic_vadd_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i64_nxv4i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2364,7 +2364,7 @@ entry:
 define <vscale x 8 x i64> @intrinsic_vadd_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i64_nxv8i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vand-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vand-rv32.ll
index 0dd0a556e2a8..5cf872bd396a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vand-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vand-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vand_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vand_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vand_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vand_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vand_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vand_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv64i8_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -308,7 +308,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vand_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vand_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vand_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vand_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vand_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vand_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv32i16_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vand_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -588,7 +588,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vand_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -628,7 +628,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vand_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -668,7 +668,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vand_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -708,7 +708,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vand_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv16i32_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vand_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -788,7 +788,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vand_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -828,7 +828,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vand_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -868,7 +868,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vand_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -908,7 +908,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vand_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -948,7 +948,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vand_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -988,7 +988,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vand_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vand_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vand_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vand_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vand_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vand_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1228,7 +1228,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vand_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1268,7 +1268,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vand_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1308,7 +1308,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vand_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1348,7 +1348,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vand_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1388,7 +1388,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vand_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1428,7 +1428,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vand_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1456,7 +1456,7 @@ entry:
 define <vscale x 1 x i8> @intrinsic_vand_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1484,7 +1484,7 @@ entry:
 define <vscale x 2 x i8> @intrinsic_vand_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1512,7 +1512,7 @@ entry:
 define <vscale x 4 x i8> @intrinsic_vand_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1540,7 +1540,7 @@ entry:
 define <vscale x 8 x i8> @intrinsic_vand_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1568,7 +1568,7 @@ entry:
 define <vscale x 16 x i8> @intrinsic_vand_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1596,7 +1596,7 @@ entry:
 define <vscale x 32 x i8> @intrinsic_vand_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1624,7 +1624,7 @@ entry:
 define <vscale x 64 x i8> @intrinsic_vand_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1652,7 +1652,7 @@ entry:
 define <vscale x 1 x i16> @intrinsic_vand_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1680,7 +1680,7 @@ entry:
 define <vscale x 2 x i16> @intrinsic_vand_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1708,7 +1708,7 @@ entry:
 define <vscale x 4 x i16> @intrinsic_vand_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1736,7 +1736,7 @@ entry:
 define <vscale x 8 x i16> @intrinsic_vand_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1764,7 +1764,7 @@ entry:
 define <vscale x 16 x i16> @intrinsic_vand_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1792,7 +1792,7 @@ entry:
 define <vscale x 32 x i16> @intrinsic_vand_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1820,7 +1820,7 @@ entry:
 define <vscale x 1 x i32> @intrinsic_vand_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1848,7 +1848,7 @@ entry:
 define <vscale x 2 x i32> @intrinsic_vand_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1876,7 +1876,7 @@ entry:
 define <vscale x 4 x i32> @intrinsic_vand_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1904,7 +1904,7 @@ entry:
 define <vscale x 8 x i32> @intrinsic_vand_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1932,7 +1932,7 @@ entry:
 define <vscale x 16 x i32> @intrinsic_vand_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vand-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vand-rv64.ll
index e6ae2578f570..6303eb55ad5e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vand-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vand-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vand_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vand_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vand_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vand_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vand_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vand_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv64i8_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -308,7 +308,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vand_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vand_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vand_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vand_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vand_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vand_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv32i16_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vand_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -588,7 +588,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vand_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -628,7 +628,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vand_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -668,7 +668,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vand_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -708,7 +708,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vand_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv16i32_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vand_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i64_nxv1i64_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -788,7 +788,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.nxv2i64(
 define <vscale x 2 x i64> @intrinsic_vand_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv2i64_nxv2i64_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -828,7 +828,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.nxv4i64(
 define <vscale x 4 x i64> @intrinsic_vand_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv4i64_nxv4i64_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -868,7 +868,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.nxv8i64(
 define <vscale x 8 x i64> @intrinsic_vand_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv8i64_nxv8i64_nxv8i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vand.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -908,7 +908,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vand_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -948,7 +948,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vand_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -988,7 +988,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vand_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vand_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vand_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vand_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vand_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vand_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1228,7 +1228,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vand_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1268,7 +1268,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vand_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1308,7 +1308,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vand_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1348,7 +1348,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vand_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1388,7 +1388,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vand_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1428,7 +1428,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vand_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1468,7 +1468,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vand_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1508,7 +1508,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vand_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1548,7 +1548,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vand_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1588,7 +1588,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vand_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1628,7 +1628,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.i64(
 define <vscale x 1 x i64> @intrinsic_vand_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv1i64_nxv1i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1668,7 +1668,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.i64(
 define <vscale x 2 x i64> @intrinsic_vand_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv2i64_nxv2i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1708,7 +1708,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.i64(
 define <vscale x 4 x i64> @intrinsic_vand_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv4i64_nxv4i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1748,7 +1748,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.i64(
 define <vscale x 8 x i64> @intrinsic_vand_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv8i64_nxv8i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vand.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1776,7 +1776,7 @@ entry:
 define <vscale x 1 x i8> @intrinsic_vand_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1804,7 +1804,7 @@ entry:
 define <vscale x 2 x i8> @intrinsic_vand_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1832,7 +1832,7 @@ entry:
 define <vscale x 4 x i8> @intrinsic_vand_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1860,7 +1860,7 @@ entry:
 define <vscale x 8 x i8> @intrinsic_vand_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1888,7 +1888,7 @@ entry:
 define <vscale x 16 x i8> @intrinsic_vand_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1916,7 +1916,7 @@ entry:
 define <vscale x 32 x i8> @intrinsic_vand_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1944,7 +1944,7 @@ entry:
 define <vscale x 64 x i8> @intrinsic_vand_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1972,7 +1972,7 @@ entry:
 define <vscale x 1 x i16> @intrinsic_vand_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2000,7 +2000,7 @@ entry:
 define <vscale x 2 x i16> @intrinsic_vand_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2028,7 +2028,7 @@ entry:
 define <vscale x 4 x i16> @intrinsic_vand_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2056,7 +2056,7 @@ entry:
 define <vscale x 8 x i16> @intrinsic_vand_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2084,7 +2084,7 @@ entry:
 define <vscale x 16 x i16> @intrinsic_vand_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2112,7 +2112,7 @@ entry:
 define <vscale x 32 x i16> @intrinsic_vand_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -2140,7 +2140,7 @@ entry:
 define <vscale x 1 x i32> @intrinsic_vand_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2168,7 +2168,7 @@ entry:
 define <vscale x 2 x i32> @intrinsic_vand_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2196,7 +2196,7 @@ entry:
 define <vscale x 4 x i32> @intrinsic_vand_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2224,7 +2224,7 @@ entry:
 define <vscale x 8 x i32> @intrinsic_vand_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2252,7 +2252,7 @@ entry:
 define <vscale x 16 x i32> @intrinsic_vand_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2280,7 +2280,7 @@ entry:
 define <vscale x 1 x i64> @intrinsic_vand_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv1i64_nxv1i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2308,7 +2308,7 @@ entry:
 define <vscale x 2 x i64> @intrinsic_vand_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv2i64_nxv2i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2336,7 +2336,7 @@ entry:
 define <vscale x 4 x i64> @intrinsic_vand_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv4i64_nxv4i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2364,7 +2364,7 @@ entry:
 define <vscale x 8 x i64> @intrinsic_vand_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv8i64_nxv8i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vand.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vasub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vasub-rv32.ll
index d47b8664ddda..0933e255f5eb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vasub-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vasub-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vasub.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vasub_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vasub.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vasub.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vasub_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vasub.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vasub.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vasub_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vasub.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vasub.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vasub_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vasub.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vasub.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vasub_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vasub.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vasub.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vasub_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vasub.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vasub.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vasub_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv64i8_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vasub.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -308,7 +308,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vasub.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vasub_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vasub.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vasub.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vasub_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vasub.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vasub.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vasub_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vasub.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vasub.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vasub_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vasub.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vasub.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vasub_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vasub.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vasub.mask.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vasub_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv32i16_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vasub.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vasub.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vasub_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vasub.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -588,7 +588,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vasub.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vasub_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vasub.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -628,7 +628,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vasub.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vasub_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vasub.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -668,7 +668,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vasub.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vasub_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vasub.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -708,7 +708,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vasub.mask.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vasub_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv16i32_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vasub.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vasub.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vasub_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vasub.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -788,7 +788,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vasub.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vasub_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vasub.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -828,7 +828,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vasub.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vasub_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vasub.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -868,7 +868,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vasub.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vasub_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vasub.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -908,7 +908,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vasub.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vasub_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vasub.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -948,7 +948,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vasub.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vasub_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vasub.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -988,7 +988,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vasub.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vasub_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vasub.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vasub.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vasub_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vasub.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vasub.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vasub_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vasub.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vasub.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vasub_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vasub.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vasub.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vasub_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vasub.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vasub.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vasub_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vasub.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1228,7 +1228,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vasub.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vasub_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vasub.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1268,7 +1268,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vasub.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vasub_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vasub.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1308,7 +1308,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vasub.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vasub_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vasub.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1348,7 +1348,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vasub.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vasub_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vasub.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1388,7 +1388,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vasub.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vasub_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vasub.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1428,7 +1428,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vasub.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vasub_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vasub.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vasub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vasub-rv64.ll
index c71df1577481..56190cbb6c11 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vasub-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vasub-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vasub.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vasub_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vasub.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vasub.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vasub_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vasub.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vasub.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vasub_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vasub.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vasub.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vasub_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vasub.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vasub.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vasub_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vasub.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vasub.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vasub_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vasub.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vasub.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vasub_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv64i8_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vasub.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -308,7 +308,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vasub.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vasub_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vasub.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vasub.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vasub_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vasub.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vasub.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vasub_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vasub.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vasub.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vasub_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vasub.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vasub.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vasub_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vasub.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vasub.mask.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vasub_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv32i16_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vasub.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vasub.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vasub_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vasub.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -588,7 +588,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vasub.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vasub_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vasub.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -628,7 +628,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vasub.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vasub_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vasub.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -668,7 +668,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vasub.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vasub_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vasub.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -708,7 +708,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vasub.mask.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vasub_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv16i32_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vasub.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vasub.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vasub_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv1i64_nxv1i64_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vasub.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -788,7 +788,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vasub.mask.nxv2i64.nxv2i64(
 define <vscale x 2 x i64> @intrinsic_vasub_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv2i64_nxv2i64_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vasub.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -828,7 +828,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vasub.mask.nxv4i64.nxv4i64(
 define <vscale x 4 x i64> @intrinsic_vasub_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv4i64_nxv4i64_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vasub.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -868,7 +868,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vasub.mask.nxv8i64.nxv8i64(
 define <vscale x 8 x i64> @intrinsic_vasub_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv8i64_nxv8i64_nxv8i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vasub.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -908,7 +908,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vasub.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vasub_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vasub.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -948,7 +948,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vasub.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vasub_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vasub.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -988,7 +988,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vasub.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vasub_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vasub.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vasub.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vasub_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vasub.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vasub.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vasub_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vasub.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vasub.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vasub_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vasub.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vasub.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vasub_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vasub.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vasub.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vasub_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vasub.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1228,7 +1228,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vasub.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vasub_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vasub.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1268,7 +1268,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vasub.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vasub_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vasub.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1308,7 +1308,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vasub.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vasub_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vasub.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1348,7 +1348,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vasub.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vasub_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vasub.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1388,7 +1388,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vasub.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vasub_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vasub.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1428,7 +1428,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vasub.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vasub_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vasub.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1468,7 +1468,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vasub.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vasub_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vasub.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1508,7 +1508,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vasub.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vasub_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vasub.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1548,7 +1548,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vasub.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vasub_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vasub.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1588,7 +1588,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vasub.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vasub_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vasub.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1628,7 +1628,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vasub.mask.nxv1i64.i64(
 define <vscale x 1 x i64> @intrinsic_vasub_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv1i64_nxv1i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vasub.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1668,7 +1668,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vasub.mask.nxv2i64.i64(
 define <vscale x 2 x i64> @intrinsic_vasub_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv2i64_nxv2i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vasub.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1708,7 +1708,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vasub.mask.nxv4i64.i64(
 define <vscale x 4 x i64> @intrinsic_vasub_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv4i64_nxv4i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vasub.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1748,7 +1748,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vasub.mask.nxv8i64.i64(
 define <vscale x 8 x i64> @intrinsic_vasub_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv8i64_nxv8i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vasub.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vasubu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vasubu-rv32.ll
index 521bba6c94e9..0644a359bec2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vasubu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vasubu-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vasubu.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vasubu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vasubu.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vasubu.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vasubu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vasubu.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vasubu.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vasubu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vasubu.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vasubu.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vasubu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vasubu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vasubu.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vasubu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vasubu.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vasubu.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vasubu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vasubu.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vasubu.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vasubu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv64i8_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vasubu.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -308,7 +308,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vasubu.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vasubu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vasubu.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vasubu.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vasubu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vasubu.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vasubu.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vasubu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vasubu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vasubu.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vasubu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vasubu.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vasubu.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vasubu_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vasubu.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vasubu.mask.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vasubu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv32i16_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vasubu.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vasubu.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vasubu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vasubu.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -588,7 +588,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vasubu.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vasubu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vasubu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -628,7 +628,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vasubu.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vasubu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vasubu.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -668,7 +668,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vasubu.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vasubu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vasubu.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -708,7 +708,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vasubu.mask.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vasubu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv16i32_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vasubu.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vasubu.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vasubu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vasubu.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -788,7 +788,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vasubu.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vasubu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vasubu.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -828,7 +828,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vasubu.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vasubu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vasubu.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -868,7 +868,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vasubu.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vasubu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vasubu.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -908,7 +908,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vasubu.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vasubu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vasubu.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -948,7 +948,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vasubu.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vasubu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vasubu.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -988,7 +988,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vasubu.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vasubu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vasubu.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vasubu.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vasubu_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vasubu.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vasubu.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vasubu_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vasubu.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vasubu.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vasubu_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vasubu.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vasubu.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vasubu_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vasubu.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vasubu.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vasubu_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vasubu.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1228,7 +1228,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vasubu.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vasubu_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vasubu.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1268,7 +1268,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vasubu.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vasubu_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vasubu.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1308,7 +1308,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vasubu.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vasubu_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vasubu.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1348,7 +1348,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vasubu.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vasubu_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vasubu.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1388,7 +1388,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vasubu.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vasubu_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vasubu.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1428,7 +1428,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vasubu.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vasubu_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vasubu.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vasubu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vasubu-rv64.ll
index b74b4026b8df..b613e246a443 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vasubu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vasubu-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vasubu.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vasubu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vasubu.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vasubu.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vasubu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vasubu.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vasubu.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vasubu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vasubu.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vasubu.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vasubu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vasubu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vasubu.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vasubu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vasubu.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vasubu.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vasubu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vasubu.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vasubu.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vasubu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv64i8_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vasubu.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -308,7 +308,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vasubu.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vasubu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vasubu.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vasubu.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vasubu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vasubu.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vasubu.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vasubu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vasubu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vasubu.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vasubu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vasubu.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vasubu.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vasubu_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vasubu.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vasubu.mask.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vasubu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv32i16_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vasubu.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vasubu.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vasubu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vasubu.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -588,7 +588,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vasubu.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vasubu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vasubu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -628,7 +628,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vasubu.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vasubu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vasubu.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -668,7 +668,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vasubu.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vasubu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vasubu.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -708,7 +708,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vasubu.mask.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vasubu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv16i32_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vasubu.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vasubu.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vasubu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv1i64_nxv1i64_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vasubu.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -788,7 +788,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vasubu.mask.nxv2i64.nxv2i64(
 define <vscale x 2 x i64> @intrinsic_vasubu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv2i64_nxv2i64_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vasubu.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -828,7 +828,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vasubu.mask.nxv4i64.nxv4i64(
 define <vscale x 4 x i64> @intrinsic_vasubu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv4i64_nxv4i64_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vasubu.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -868,7 +868,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vasubu.mask.nxv8i64.nxv8i64(
 define <vscale x 8 x i64> @intrinsic_vasubu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv8i64_nxv8i64_nxv8i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vasubu.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -908,7 +908,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vasubu.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vasubu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vasubu.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -948,7 +948,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vasubu.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vasubu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vasubu.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -988,7 +988,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vasubu.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vasubu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vasubu.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vasubu.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vasubu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vasubu.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vasubu.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vasubu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vasubu.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vasubu.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vasubu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vasubu.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vasubu.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vasubu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vasubu.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vasubu.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vasubu_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vasubu.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1228,7 +1228,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vasubu.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vasubu_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vasubu.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1268,7 +1268,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vasubu.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vasubu_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vasubu.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1308,7 +1308,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vasubu.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vasubu_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vasubu.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1348,7 +1348,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vasubu.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vasubu_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vasubu.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1388,7 +1388,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vasubu.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vasubu_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vasubu.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1428,7 +1428,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vasubu.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vasubu_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vasubu.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1468,7 +1468,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vasubu.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vasubu_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vasubu.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1508,7 +1508,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vasubu.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vasubu_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vasubu.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1548,7 +1548,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vasubu.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vasubu_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vasubu.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1588,7 +1588,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vasubu.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vasubu_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vasubu.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1628,7 +1628,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vasubu.mask.nxv1i64.i64(
 define <vscale x 1 x i64> @intrinsic_vasubu_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv1i64_nxv1i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vasubu.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1668,7 +1668,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vasubu.mask.nxv2i64.i64(
 define <vscale x 2 x i64> @intrinsic_vasubu_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv2i64_nxv2i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vasubu.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1708,7 +1708,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vasubu.mask.nxv4i64.i64(
 define <vscale x 4 x i64> @intrinsic_vasubu_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv4i64_nxv4i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vasubu.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1748,7 +1748,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vasubu.mask.nxv8i64.i64(
 define <vscale x 8 x i64> @intrinsic_vasubu_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv8i64_nxv8i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vasubu.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll
index ba0912cba280..624f588b3e23 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vdiv.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vdiv_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vdiv.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vdiv.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vdiv_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vdiv.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vdiv.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vdiv_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vdiv.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vdiv.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vdiv_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vdiv.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vdiv.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vdiv_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vdiv.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vdiv.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vdiv_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv64i8_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vdiv.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -308,7 +308,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vdiv.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vdiv_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vdiv.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vdiv.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vdiv_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vdiv.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vdiv.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vdiv_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vdiv.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vdiv.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vdiv_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vdiv.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vdiv.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vdiv_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vdiv.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vdiv.mask.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vdiv_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv32i16_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vdiv.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vdiv_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -588,7 +588,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vdiv.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vdiv_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vdiv.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -628,7 +628,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vdiv.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vdiv_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vdiv.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -668,7 +668,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vdiv.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vdiv_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vdiv.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -708,7 +708,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vdiv.mask.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vdiv_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv16i32_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vdiv.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vdiv.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vdiv_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vdiv.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -788,7 +788,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vdiv.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vdiv_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vdiv.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -828,7 +828,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vdiv.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vdiv_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vdiv.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -868,7 +868,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vdiv.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vdiv_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vdiv.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -908,7 +908,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vdiv.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vdiv_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vdiv.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -948,7 +948,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vdiv.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vdiv_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vdiv.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -988,7 +988,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vdiv.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vdiv_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vdiv.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vdiv.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vdiv_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vdiv.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vdiv.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vdiv_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vdiv.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vdiv.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vdiv_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vdiv.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vdiv.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vdiv_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vdiv.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vdiv.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vdiv_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vdiv.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1228,7 +1228,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vdiv.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vdiv_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vdiv.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1268,7 +1268,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vdiv.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vdiv_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vdiv.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1308,7 +1308,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vdiv.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vdiv_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vdiv.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1348,7 +1348,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vdiv.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vdiv_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vdiv.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1388,7 +1388,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vdiv.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vdiv_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vdiv.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1428,7 +1428,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vdiv.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vdiv_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vdiv.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vdiv-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv-rv64.ll
index f5869dea2d0d..fe029bcfc938 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vdiv-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vdiv-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vdiv.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vdiv_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vdiv.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vdiv.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vdiv_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vdiv.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vdiv.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vdiv_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vdiv.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vdiv.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vdiv_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vdiv.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vdiv.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vdiv_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vdiv.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vdiv.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vdiv_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv64i8_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vdiv.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -308,7 +308,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vdiv.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vdiv_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vdiv.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vdiv.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vdiv_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vdiv.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vdiv.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vdiv_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vdiv.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vdiv.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vdiv_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vdiv.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vdiv.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vdiv_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vdiv.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vdiv.mask.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vdiv_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv32i16_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vdiv.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vdiv_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -588,7 +588,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vdiv.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vdiv_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vdiv.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -628,7 +628,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vdiv.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vdiv_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vdiv.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -668,7 +668,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vdiv.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vdiv_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vdiv.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -708,7 +708,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vdiv.mask.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vdiv_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv16i32_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vdiv.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vdiv.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vdiv_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i64_nxv1i64_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vdiv.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -788,7 +788,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vdiv.mask.nxv2i64.nxv2i64(
 define <vscale x 2 x i64> @intrinsic_vdiv_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv2i64_nxv2i64_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vdiv.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -828,7 +828,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vdiv.mask.nxv4i64.nxv4i64(
 define <vscale x 4 x i64> @intrinsic_vdiv_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv4i64_nxv4i64_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vdiv.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -868,7 +868,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vdiv.mask.nxv8i64.nxv8i64(
 define <vscale x 8 x i64> @intrinsic_vdiv_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv8i64_nxv8i64_nxv8i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vdiv.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -908,7 +908,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vdiv.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vdiv_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vdiv.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -948,7 +948,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vdiv.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vdiv_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vdiv.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -988,7 +988,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vdiv.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vdiv_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vdiv.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vdiv.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vdiv_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vdiv.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vdiv.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vdiv_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vdiv.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vdiv.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vdiv_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vdiv.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vdiv.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vdiv_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vdiv.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vdiv.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vdiv_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vdiv.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1228,7 +1228,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vdiv.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vdiv_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vdiv.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1268,7 +1268,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vdiv.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vdiv_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vdiv.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1308,7 +1308,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vdiv.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vdiv_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vdiv.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1348,7 +1348,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vdiv.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vdiv_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vdiv.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1388,7 +1388,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vdiv.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vdiv_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vdiv.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1428,7 +1428,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vdiv.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vdiv_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vdiv.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1468,7 +1468,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vdiv.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vdiv_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vdiv.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1508,7 +1508,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vdiv.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vdiv_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vdiv.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1548,7 +1548,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vdiv.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vdiv_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vdiv.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1588,7 +1588,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vdiv.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vdiv_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vdiv.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1628,7 +1628,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vdiv.mask.nxv1i64.i64(
 define <vscale x 1 x i64> @intrinsic_vdiv_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv1i64_nxv1i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vdiv.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1668,7 +1668,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vdiv.mask.nxv2i64.i64(
 define <vscale x 2 x i64> @intrinsic_vdiv_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv2i64_nxv2i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vdiv.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1708,7 +1708,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vdiv.mask.nxv4i64.i64(
 define <vscale x 4 x i64> @intrinsic_vdiv_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv4i64_nxv4i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vdiv.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1748,7 +1748,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vdiv.mask.nxv8i64.i64(
 define <vscale x 8 x i64> @intrinsic_vdiv_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv8i64_nxv8i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vdiv.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll
index ecfbd3b254eb..f6cc2ecbf154 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vdivu.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vdivu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vdivu.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vdivu.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vdivu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vdivu.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vdivu.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vdivu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vdivu.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vdivu.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vdivu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vdivu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vdivu.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vdivu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vdivu.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vdivu.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vdivu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vdivu.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vdivu.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vdivu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv64i8_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vdivu.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -308,7 +308,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vdivu.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vdivu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vdivu.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vdivu.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vdivu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vdivu.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vdivu.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vdivu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vdivu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vdivu.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vdivu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vdivu.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vdivu.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vdivu_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vdivu.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vdivu.mask.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vdivu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv32i16_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vdivu.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vdivu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -588,7 +588,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vdivu.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vdivu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vdivu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -628,7 +628,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vdivu.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vdivu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vdivu.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -668,7 +668,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vdivu.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vdivu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vdivu.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -708,7 +708,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vdivu.mask.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vdivu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv16i32_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vdivu.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vdivu.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vdivu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vdivu.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -788,7 +788,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vdivu.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vdivu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vdivu.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -828,7 +828,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vdivu.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vdivu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vdivu.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -868,7 +868,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vdivu.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vdivu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vdivu.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -908,7 +908,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vdivu.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vdivu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vdivu.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -948,7 +948,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vdivu.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vdivu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vdivu.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -988,7 +988,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vdivu.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vdivu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vdivu.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vdivu.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vdivu_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vdivu.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vdivu.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vdivu_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vdivu.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vdivu.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vdivu_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vdivu.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vdivu.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vdivu_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vdivu.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vdivu.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vdivu_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vdivu.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1228,7 +1228,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vdivu.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vdivu_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vdivu.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1268,7 +1268,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vdivu.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vdivu_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vdivu.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1308,7 +1308,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vdivu.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vdivu_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vdivu.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1348,7 +1348,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vdivu.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vdivu_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vdivu.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1388,7 +1388,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vdivu.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vdivu_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vdivu.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1428,7 +1428,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vdivu.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vdivu_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vdivu.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vdivu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vdivu-rv64.ll
index a09d88f5db79..dc0bb332422f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vdivu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vdivu-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vdivu.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vdivu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vdivu.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vdivu.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vdivu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vdivu.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vdivu.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vdivu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vdivu.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vdivu.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vdivu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vdivu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vdivu.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vdivu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vdivu.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vdivu.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vdivu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vdivu.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vdivu.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vdivu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv64i8_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vdivu.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -308,7 +308,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vdivu.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vdivu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vdivu.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vdivu.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vdivu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vdivu.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vdivu.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vdivu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vdivu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vdivu.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vdivu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vdivu.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vdivu.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vdivu_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vdivu.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vdivu.mask.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vdivu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv32i16_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vdivu.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vdivu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -588,7 +588,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vdivu.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vdivu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vdivu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -628,7 +628,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vdivu.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vdivu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vdivu.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -668,7 +668,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vdivu.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vdivu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vdivu.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -708,7 +708,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vdivu.mask.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vdivu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv16i32_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vdivu.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vdivu.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vdivu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv1i64_nxv1i64_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vdivu.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -788,7 +788,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vdivu.mask.nxv2i64.nxv2i64(
 define <vscale x 2 x i64> @intrinsic_vdivu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv2i64_nxv2i64_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vdivu.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -828,7 +828,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vdivu.mask.nxv4i64.nxv4i64(
 define <vscale x 4 x i64> @intrinsic_vdivu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv4i64_nxv4i64_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vdivu.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -868,7 +868,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vdivu.mask.nxv8i64.nxv8i64(
 define <vscale x 8 x i64> @intrinsic_vdivu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv8i64_nxv8i64_nxv8i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vdivu.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -908,7 +908,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vdivu.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vdivu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vdivu.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -948,7 +948,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vdivu.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vdivu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vdivu.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -988,7 +988,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vdivu.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vdivu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vdivu.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vdivu.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vdivu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vdivu.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vdivu.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vdivu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vdivu.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vdivu.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vdivu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vdivu.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vdivu.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vdivu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vdivu.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vdivu.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vdivu_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vdivu.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1228,7 +1228,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vdivu.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vdivu_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vdivu.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1268,7 +1268,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vdivu.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vdivu_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vdivu.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1308,7 +1308,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vdivu.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vdivu_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vdivu.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1348,7 +1348,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vdivu.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vdivu_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vdivu.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1388,7 +1388,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vdivu.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vdivu_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vdivu.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1428,7 +1428,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vdivu.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vdivu_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vdivu.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1468,7 +1468,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vdivu.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vdivu_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vdivu.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1508,7 +1508,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vdivu.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vdivu_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vdivu.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1548,7 +1548,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vdivu.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vdivu_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vdivu.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1588,7 +1588,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vdivu.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vdivu_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vdivu.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1628,7 +1628,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vdivu.mask.nxv1i64.i64(
 define <vscale x 1 x i64> @intrinsic_vdivu_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv1i64_nxv1i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vdivu.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1668,7 +1668,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vdivu.mask.nxv2i64.i64(
 define <vscale x 2 x i64> @intrinsic_vdivu_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv2i64_nxv2i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vdivu.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1708,7 +1708,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vdivu.mask.nxv4i64.i64(
 define <vscale x 4 x i64> @intrinsic_vdivu_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv4i64_nxv4i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vdivu.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1748,7 +1748,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vdivu.mask.nxv8i64.i64(
 define <vscale x 8 x i64> @intrinsic_vdivu_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv8i64_nxv8i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vdivu.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-rv32.ll
index bb1ee0745400..a9d4dfcabcc9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfadd-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfadd-rv32.ll
@@ -29,7 +29,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16(
 define <vscale x 1 x half> @intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -69,7 +69,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16(
 define <vscale x 2 x half> @intrinsic_vfadd_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f16_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -109,7 +109,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16(
 define <vscale x 4 x half> @intrinsic_vfadd_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f16_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -149,7 +149,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16(
 define <vscale x 8 x half> @intrinsic_vfadd_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f16_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -189,7 +189,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16(
 define <vscale x 16 x half> @intrinsic_vfadd_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16f16_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -229,7 +229,7 @@ declare <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16(
 define <vscale x 32 x half> @intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -269,7 +269,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32(
 define <vscale x 1 x float> @intrinsic_vfadd_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f32_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -309,7 +309,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32(
 define <vscale x 2 x float> @intrinsic_vfadd_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f32_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -349,7 +349,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32(
 define <vscale x 4 x float> @intrinsic_vfadd_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f32_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -389,7 +389,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32(
 define <vscale x 8 x float> @intrinsic_vfadd_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f32_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -429,7 +429,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32(
 define <vscale x 16 x float> @intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -469,7 +469,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.f16(
 define <vscale x 1 x half> @intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -509,7 +509,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.f16(
 define <vscale x 2 x half> @intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -549,7 +549,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.f16(
 define <vscale x 4 x half> @intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -589,7 +589,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.f16(
 define <vscale x 8 x half> @intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -629,7 +629,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.f16(
 define <vscale x 16 x half> @intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -669,7 +669,7 @@ declare <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.f16(
 define <vscale x 32 x half> @intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -709,7 +709,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.f32(
 define <vscale x 1 x float> @intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -749,7 +749,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.f32(
 define <vscale x 2 x float> @intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -789,7 +789,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.f32(
 define <vscale x 4 x float> @intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -829,7 +829,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.f32(
 define <vscale x 8 x float> @intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -869,7 +869,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.f32(
 define <vscale x 16 x float> @intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-rv64.ll
index f14906c430e1..ada32e3ca528 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfadd-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfadd-rv64.ll
@@ -29,7 +29,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16(
 define <vscale x 1 x half> @intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -69,7 +69,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16(
 define <vscale x 2 x half> @intrinsic_vfadd_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f16_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -109,7 +109,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16(
 define <vscale x 4 x half> @intrinsic_vfadd_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f16_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -149,7 +149,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16(
 define <vscale x 8 x half> @intrinsic_vfadd_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f16_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -189,7 +189,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16(
 define <vscale x 16 x half> @intrinsic_vfadd_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16f16_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -229,7 +229,7 @@ declare <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16(
 define <vscale x 32 x half> @intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -269,7 +269,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32(
 define <vscale x 1 x float> @intrinsic_vfadd_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f32_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -309,7 +309,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32(
 define <vscale x 2 x float> @intrinsic_vfadd_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f32_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -349,7 +349,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32(
 define <vscale x 4 x float> @intrinsic_vfadd_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f32_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -389,7 +389,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32(
 define <vscale x 8 x float> @intrinsic_vfadd_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f32_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -429,7 +429,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32(
 define <vscale x 16 x float> @intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -469,7 +469,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64(
 define <vscale x 1 x double> @intrinsic_vfadd_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f64_nxv1f64_nxv1f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -509,7 +509,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64(
 define <vscale x 2 x double> @intrinsic_vfadd_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f64_nxv2f64_nxv2f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -549,7 +549,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64(
 define <vscale x 4 x double> @intrinsic_vfadd_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f64_nxv4f64_nxv4f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -589,7 +589,7 @@ declare <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64(
 define <vscale x 8 x double> @intrinsic_vfadd_mask_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f64_nxv8f64_nxv8f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -629,7 +629,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.f16(
 define <vscale x 1 x half> @intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -669,7 +669,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.f16(
 define <vscale x 2 x half> @intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -709,7 +709,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.f16(
 define <vscale x 4 x half> @intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -749,7 +749,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.f16(
 define <vscale x 8 x half> @intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -789,7 +789,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.f16(
 define <vscale x 16 x half> @intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -829,7 +829,7 @@ declare <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.f16(
 define <vscale x 32 x half> @intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -869,7 +869,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.f32(
 define <vscale x 1 x float> @intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -909,7 +909,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.f32(
 define <vscale x 2 x float> @intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -949,7 +949,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.f32(
 define <vscale x 4 x float> @intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -989,7 +989,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.f32(
 define <vscale x 8 x float> @intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1029,7 +1029,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.f32(
 define <vscale x 16 x float> @intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1069,7 +1069,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.f64(
 define <vscale x 1 x double> @intrinsic_vfadd_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f64_nxv1f64_f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1109,7 +1109,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.f64(
 define <vscale x 2 x double> @intrinsic_vfadd_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f64_nxv2f64_f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1149,7 +1149,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.f64(
 define <vscale x 4 x double> @intrinsic_vfadd_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f64_nxv4f64_f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1189,7 +1189,7 @@ declare <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.f64(
 define <vscale x 8 x double> @intrinsic_vfadd_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f64_nxv8f64_f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.f64(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv32.ll
index 3187dd573cba..c7efd5a74a85 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfdiv.mask.nxv1f16(
 define <vscale x 1 x half> @intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vfdiv.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfdiv.mask.nxv2f16(
 define <vscale x 2 x half> @intrinsic_vfdiv_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vfdiv.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfdiv.mask.nxv4f16(
 define <vscale x 4 x half> @intrinsic_vfdiv_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vfdiv.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfdiv.mask.nxv8f16(
 define <vscale x 8 x half> @intrinsic_vfdiv_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vfdiv.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfdiv.mask.nxv16f16(
 define <vscale x 16 x half> @intrinsic_vfdiv_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vfdiv.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x half> @llvm.riscv.vfdiv.mask.nxv32f16(
 define <vscale x 32 x half> @intrinsic_vfdiv_mask_vv_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv32f16_nxv32f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x half> @llvm.riscv.vfdiv.mask.nxv32f16(
     <vscale x 32 x half> %0,
@@ -268,7 +268,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfdiv.mask.nxv1f32(
 define <vscale x 1 x float> @intrinsic_vfdiv_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfdiv.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -308,7 +308,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfdiv.mask.nxv2f32(
 define <vscale x 2 x float> @intrinsic_vfdiv_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfdiv.mask.nxv2f32(
     <vscale x 2 x float> %0,
@@ -348,7 +348,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfdiv.mask.nxv4f32(
 define <vscale x 4 x float> @intrinsic_vfdiv_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfdiv.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -388,7 +388,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfdiv.mask.nxv8f32(
 define <vscale x 8 x float> @intrinsic_vfdiv_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfdiv.mask.nxv8f32(
     <vscale x 8 x float> %0,
@@ -428,7 +428,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfdiv.mask.nxv16f32(
 define <vscale x 16 x float> @intrinsic_vfdiv_mask_vv_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv16f32_nxv16f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfdiv.mask.nxv16f32(
     <vscale x 16 x float> %0,
@@ -468,7 +468,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfdiv.mask.nxv1f16.f16(
 define <vscale x 1 x half> @intrinsic_vfdiv_mask_vf_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv1f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vfdiv.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -508,7 +508,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfdiv.mask.nxv2f16.f16(
 define <vscale x 2 x half> @intrinsic_vfdiv_mask_vf_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv2f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vfdiv.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -548,7 +548,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfdiv.mask.nxv4f16.f16(
 define <vscale x 4 x half> @intrinsic_vfdiv_mask_vf_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv4f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vfdiv.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -588,7 +588,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfdiv.mask.nxv8f16.f16(
 define <vscale x 8 x half> @intrinsic_vfdiv_mask_vf_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv8f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vfdiv.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -628,7 +628,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfdiv.mask.nxv16f16.f16(
 define <vscale x 16 x half> @intrinsic_vfdiv_mask_vf_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv16f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vfdiv.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -668,7 +668,7 @@ declare <vscale x 32 x half> @llvm.riscv.vfdiv.mask.nxv32f16.f16(
 define <vscale x 32 x half> @intrinsic_vfdiv_mask_vf_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv32f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 32 x half> @llvm.riscv.vfdiv.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -708,7 +708,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfdiv.mask.nxv1f32.f32(
 define <vscale x 1 x float> @intrinsic_vfdiv_mask_vf_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv1f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfdiv.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -748,7 +748,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfdiv.mask.nxv2f32.f32(
 define <vscale x 2 x float> @intrinsic_vfdiv_mask_vf_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv2f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfdiv.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -788,7 +788,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfdiv.mask.nxv4f32.f32(
 define <vscale x 4 x float> @intrinsic_vfdiv_mask_vf_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv4f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfdiv.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -828,7 +828,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfdiv.mask.nxv8f32.f32(
 define <vscale x 8 x float> @intrinsic_vfdiv_mask_vf_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv8f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfdiv.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -868,7 +868,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfdiv.mask.nxv16f32.f32(
 define <vscale x 16 x float> @intrinsic_vfdiv_mask_vf_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv16f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfdiv.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv64.ll
index 438098d3201d..fd2e6bb8f773 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfdiv.mask.nxv1f16(
 define <vscale x 1 x half> @intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vfdiv.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfdiv.mask.nxv2f16(
 define <vscale x 2 x half> @intrinsic_vfdiv_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vfdiv.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfdiv.mask.nxv4f16(
 define <vscale x 4 x half> @intrinsic_vfdiv_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vfdiv.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfdiv.mask.nxv8f16(
 define <vscale x 8 x half> @intrinsic_vfdiv_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vfdiv.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfdiv.mask.nxv16f16(
 define <vscale x 16 x half> @intrinsic_vfdiv_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vfdiv.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x half> @llvm.riscv.vfdiv.mask.nxv32f16(
 define <vscale x 32 x half> @intrinsic_vfdiv_mask_vv_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv32f16_nxv32f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x half> @llvm.riscv.vfdiv.mask.nxv32f16(
     <vscale x 32 x half> %0,
@@ -268,7 +268,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfdiv.mask.nxv1f32(
 define <vscale x 1 x float> @intrinsic_vfdiv_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfdiv.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -308,7 +308,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfdiv.mask.nxv2f32(
 define <vscale x 2 x float> @intrinsic_vfdiv_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfdiv.mask.nxv2f32(
     <vscale x 2 x float> %0,
@@ -348,7 +348,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfdiv.mask.nxv4f32(
 define <vscale x 4 x float> @intrinsic_vfdiv_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfdiv.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -388,7 +388,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfdiv.mask.nxv8f32(
 define <vscale x 8 x float> @intrinsic_vfdiv_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfdiv.mask.nxv8f32(
     <vscale x 8 x float> %0,
@@ -428,7 +428,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfdiv.mask.nxv16f32(
 define <vscale x 16 x float> @intrinsic_vfdiv_mask_vv_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv16f32_nxv16f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfdiv.mask.nxv16f32(
     <vscale x 16 x float> %0,
@@ -468,7 +468,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfdiv.mask.nxv1f64(
 define <vscale x 1 x double> @intrinsic_vfdiv_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f64_nxv1f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x double> @llvm.riscv.vfdiv.mask.nxv1f64(
     <vscale x 1 x double> %0,
@@ -508,7 +508,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfdiv.mask.nxv2f64(
 define <vscale x 2 x double> @intrinsic_vfdiv_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv2f64_nxv2f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x double> @llvm.riscv.vfdiv.mask.nxv2f64(
     <vscale x 2 x double> %0,
@@ -548,7 +548,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfdiv.mask.nxv4f64(
 define <vscale x 4 x double> @intrinsic_vfdiv_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv4f64_nxv4f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x double> @llvm.riscv.vfdiv.mask.nxv4f64(
     <vscale x 4 x double> %0,
@@ -588,7 +588,7 @@ declare <vscale x 8 x double> @llvm.riscv.vfdiv.mask.nxv8f64(
 define <vscale x 8 x double> @intrinsic_vfdiv_mask_vv_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv8f64_nxv8f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x double> @llvm.riscv.vfdiv.mask.nxv8f64(
     <vscale x 8 x double> %0,
@@ -628,7 +628,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfdiv.mask.nxv1f16.f16(
 define <vscale x 1 x half> @intrinsic_vfdiv_mask_vf_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv1f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vfdiv.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -668,7 +668,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfdiv.mask.nxv2f16.f16(
 define <vscale x 2 x half> @intrinsic_vfdiv_mask_vf_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv2f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vfdiv.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -708,7 +708,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfdiv.mask.nxv4f16.f16(
 define <vscale x 4 x half> @intrinsic_vfdiv_mask_vf_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv4f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vfdiv.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -748,7 +748,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfdiv.mask.nxv8f16.f16(
 define <vscale x 8 x half> @intrinsic_vfdiv_mask_vf_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv8f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vfdiv.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -788,7 +788,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfdiv.mask.nxv16f16.f16(
 define <vscale x 16 x half> @intrinsic_vfdiv_mask_vf_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv16f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vfdiv.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -828,7 +828,7 @@ declare <vscale x 32 x half> @llvm.riscv.vfdiv.mask.nxv32f16.f16(
 define <vscale x 32 x half> @intrinsic_vfdiv_mask_vf_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv32f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 32 x half> @llvm.riscv.vfdiv.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -868,7 +868,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfdiv.mask.nxv1f32.f32(
 define <vscale x 1 x float> @intrinsic_vfdiv_mask_vf_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv1f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfdiv.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -908,7 +908,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfdiv.mask.nxv2f32.f32(
 define <vscale x 2 x float> @intrinsic_vfdiv_mask_vf_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv2f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfdiv.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -948,7 +948,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfdiv.mask.nxv4f32.f32(
 define <vscale x 4 x float> @intrinsic_vfdiv_mask_vf_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv4f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfdiv.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -988,7 +988,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfdiv.mask.nxv8f32.f32(
 define <vscale x 8 x float> @intrinsic_vfdiv_mask_vf_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv8f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfdiv.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfdiv.mask.nxv16f32.f32(
 define <vscale x 16 x float> @intrinsic_vfdiv_mask_vf_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv16f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfdiv.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfdiv.mask.nxv1f64.f64(
 define <vscale x 1 x double> @intrinsic_vfdiv_mask_vf_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv1f64_f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x double> @llvm.riscv.vfdiv.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfdiv.mask.nxv2f64.f64(
 define <vscale x 2 x double> @intrinsic_vfdiv_mask_vf_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv2f64_f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x double> @llvm.riscv.vfdiv.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfdiv.mask.nxv4f64.f64(
 define <vscale x 4 x double> @intrinsic_vfdiv_mask_vf_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv4f64_f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x double> @llvm.riscv.vfdiv.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 8 x double> @llvm.riscv.vfdiv.mask.nxv8f64.f64(
 define <vscale x 8 x double> @intrinsic_vfdiv_mask_vf_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv8f64_f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x double> @llvm.riscv.vfdiv.mask.nxv8f64.f64(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv32.ll
index 44f0ecb2f0d5..c1f825444177 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv32.ll
@@ -10,7 +10,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfmacc.nxv1f16.nxv1f16(
 define <vscale x 1 x half>  @intrinsic_vfmacc_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16(
 define <vscale x 1 x half>  @intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfmacc.nxv2f16.nxv2f16(
 define <vscale x 2 x half>  @intrinsic_vfmacc_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv2f16_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfmacc.mask.nxv2f16.nxv2f16(
 define <vscale x 2 x half>  @intrinsic_vfmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfmacc.nxv4f16.nxv4f16(
 define <vscale x 4 x half>  @intrinsic_vfmacc_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfmacc.mask.nxv4f16.nxv4f16(
 define <vscale x 4 x half>  @intrinsic_vfmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfmacc.nxv8f16.nxv8f16(
 define <vscale x 8 x half>  @intrinsic_vfmacc_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv8f16_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v16, v18, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfmacc.mask.nxv8f16.nxv8f16(
 define <vscale x 8 x half>  @intrinsic_vfmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v16, v18, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -196,7 +196,7 @@ define <vscale x 16 x half>  @intrinsic_vfmacc_vv_nxv16f16_nxv16f16_nxv16f16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v16, v20, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -221,7 +221,7 @@ define <vscale x 16 x half>  @intrinsic_vfmacc_mask_vv_nxv16f16_nxv16f16_nxv16f1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v16, v20, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -244,7 +244,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfmacc.nxv1f32.nxv1f32(
 define <vscale x 1 x float>  @intrinsic_vfmacc_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv1f32_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -267,7 +267,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfmacc.mask.nxv1f32.nxv1f32(
 define <vscale x 1 x float>  @intrinsic_vfmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -290,7 +290,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfmacc.nxv2f32.nxv2f32(
 define <vscale x 2 x float>  @intrinsic_vfmacc_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -313,7 +313,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfmacc.mask.nxv2f32.nxv2f32(
 define <vscale x 2 x float>  @intrinsic_vfmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -336,7 +336,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfmacc.nxv4f32.nxv4f32(
 define <vscale x 4 x float>  @intrinsic_vfmacc_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv4f32_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v16, v18, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -359,7 +359,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfmacc.mask.nxv4f32.nxv4f32(
 define <vscale x 4 x float>  @intrinsic_vfmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v16, v18, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -384,7 +384,7 @@ define <vscale x 8 x float>  @intrinsic_vfmacc_vv_nxv8f32_nxv8f32_nxv8f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v16, v20, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -409,7 +409,7 @@ define <vscale x 8 x float>  @intrinsic_vfmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v16, v20, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -433,7 +433,7 @@ define <vscale x 1 x half>  @intrinsic_vfmacc_vf_nxv1f16_f16_nxv1f16(<vscale x 1
 ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv1f16_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -457,7 +457,7 @@ define <vscale x 1 x half> @intrinsic_vfmacc_mask_vf_nxv1f16_f16_nxv1f16(<vscale
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv1f16_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -481,7 +481,7 @@ define <vscale x 2 x half>  @intrinsic_vfmacc_vf_nxv2f16_f16_nxv2f16(<vscale x 2
 ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv2f16_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -505,7 +505,7 @@ define <vscale x 2 x half> @intrinsic_vfmacc_mask_vf_nxv2f16_f16_nxv2f16(<vscale
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv2f16_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -529,7 +529,7 @@ define <vscale x 4 x half>  @intrinsic_vfmacc_vf_nxv4f16_f16_nxv4f16(<vscale x 4
 ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv4f16_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -553,7 +553,7 @@ define <vscale x 4 x half> @intrinsic_vfmacc_mask_vf_nxv4f16_f16_nxv4f16(<vscale
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv4f16_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -577,7 +577,7 @@ define <vscale x 8 x half>  @intrinsic_vfmacc_vf_nxv8f16_f16_nxv8f16(<vscale x 8
 ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv8f16_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v16, ft0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -601,7 +601,7 @@ define <vscale x 8 x half> @intrinsic_vfmacc_mask_vf_nxv8f16_f16_nxv8f16(<vscale
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv8f16_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v16, ft0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -625,7 +625,7 @@ define <vscale x 16 x half>  @intrinsic_vfmacc_vf_nxv16f16_f16_nxv16f16(<vscale
 ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv16f16_f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v16, ft0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -649,7 +649,7 @@ define <vscale x 16 x half> @intrinsic_vfmacc_mask_vf_nxv16f16_f16_nxv16f16(<vsc
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv16f16_f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v16, ft0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -673,7 +673,7 @@ define <vscale x 1 x float>  @intrinsic_vfmacc_vf_nxv1f32_f32_nxv1f32(<vscale x
 ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv1f32_f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -697,7 +697,7 @@ define <vscale x 1 x float> @intrinsic_vfmacc_mask_vf_nxv1f32_f32_nxv1f32(<vscal
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv1f32_f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -721,7 +721,7 @@ define <vscale x 2 x float>  @intrinsic_vfmacc_vf_nxv2f32_f32_nxv2f32(<vscale x
 ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv2f32_f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -745,7 +745,7 @@ define <vscale x 2 x float> @intrinsic_vfmacc_mask_vf_nxv2f32_f32_nxv2f32(<vscal
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv2f32_f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -769,7 +769,7 @@ define <vscale x 4 x float>  @intrinsic_vfmacc_vf_nxv4f32_f32_nxv4f32(<vscale x
 ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv4f32_f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v16, ft0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -793,7 +793,7 @@ define <vscale x 4 x float> @intrinsic_vfmacc_mask_vf_nxv4f32_f32_nxv4f32(<vscal
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv4f32_f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v16, ft0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -817,7 +817,7 @@ define <vscale x 8 x float>  @intrinsic_vfmacc_vf_nxv8f32_f32_nxv8f32(<vscale x
 ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv8f32_f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v16, ft0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -841,7 +841,7 @@ define <vscale x 8 x float> @intrinsic_vfmacc_mask_vf_nxv8f32_f32_nxv8f32(<vscal
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv8f32_f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v16, ft0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv64.ll
index 6b83445292a3..d9dd331375b5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv64.ll
@@ -10,7 +10,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfmacc.nxv1f16.nxv1f16(
 define <vscale x 1 x half>  @intrinsic_vfmacc_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16(
 define <vscale x 1 x half>  @intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfmacc.nxv2f16.nxv2f16(
 define <vscale x 2 x half>  @intrinsic_vfmacc_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv2f16_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfmacc.mask.nxv2f16.nxv2f16(
 define <vscale x 2 x half>  @intrinsic_vfmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfmacc.nxv4f16.nxv4f16(
 define <vscale x 4 x half>  @intrinsic_vfmacc_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfmacc.mask.nxv4f16.nxv4f16(
 define <vscale x 4 x half>  @intrinsic_vfmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfmacc.nxv8f16.nxv8f16(
 define <vscale x 8 x half>  @intrinsic_vfmacc_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv8f16_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v16, v18, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfmacc.mask.nxv8f16.nxv8f16(
 define <vscale x 8 x half>  @intrinsic_vfmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v16, v18, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -196,7 +196,7 @@ define <vscale x 16 x half>  @intrinsic_vfmacc_vv_nxv16f16_nxv16f16_nxv16f16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v16, v20, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -221,7 +221,7 @@ define <vscale x 16 x half>  @intrinsic_vfmacc_mask_vv_nxv16f16_nxv16f16_nxv16f1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v16, v20, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -244,7 +244,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfmacc.nxv1f32.nxv1f32(
 define <vscale x 1 x float>  @intrinsic_vfmacc_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv1f32_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -267,7 +267,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfmacc.mask.nxv1f32.nxv1f32(
 define <vscale x 1 x float>  @intrinsic_vfmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -290,7 +290,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfmacc.nxv2f32.nxv2f32(
 define <vscale x 2 x float>  @intrinsic_vfmacc_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -313,7 +313,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfmacc.mask.nxv2f32.nxv2f32(
 define <vscale x 2 x float>  @intrinsic_vfmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -336,7 +336,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfmacc.nxv4f32.nxv4f32(
 define <vscale x 4 x float>  @intrinsic_vfmacc_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv4f32_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v16, v18, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -359,7 +359,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfmacc.mask.nxv4f32.nxv4f32(
 define <vscale x 4 x float>  @intrinsic_vfmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v16, v18, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -384,7 +384,7 @@ define <vscale x 8 x float>  @intrinsic_vfmacc_vv_nxv8f32_nxv8f32_nxv8f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v16, v20, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -409,7 +409,7 @@ define <vscale x 8 x float>  @intrinsic_vfmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v16, v20, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -432,7 +432,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfmacc.nxv1f64.nxv1f64(
 define <vscale x 1 x double>  @intrinsic_vfmacc_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -455,7 +455,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfmacc.mask.nxv1f64.nxv1f64(
 define <vscale x 1 x double>  @intrinsic_vfmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -478,7 +478,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfmacc.nxv2f64.nxv2f64(
 define <vscale x 2 x double>  @intrinsic_vfmacc_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv2f64_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v16, v18, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -501,7 +501,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfmacc.mask.nxv2f64.nxv2f64(
 define <vscale x 2 x double>  @intrinsic_vfmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v16, v18, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -526,7 +526,7 @@ define <vscale x 4 x double>  @intrinsic_vfmacc_vv_nxv4f64_nxv4f64_nxv4f64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e64,m4,ta,mu
 ; CHECK-NEXT:    vle64.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v16, v20, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -551,7 +551,7 @@ define <vscale x 4 x double>  @intrinsic_vfmacc_mask_vv_nxv4f64_nxv4f64_nxv4f64(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e64,m4,ta,mu
 ; CHECK-NEXT:    vle64.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v16, v20, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -575,7 +575,7 @@ define <vscale x 1 x half>  @intrinsic_vfmacc_vf_nxv1f16_f16_nxv1f16(<vscale x 1
 ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv1f16_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -599,7 +599,7 @@ define <vscale x 1 x half> @intrinsic_vfmacc_mask_vf_nxv1f16_f16_nxv1f16(<vscale
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv1f16_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -623,7 +623,7 @@ define <vscale x 2 x half>  @intrinsic_vfmacc_vf_nxv2f16_f16_nxv2f16(<vscale x 2
 ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv2f16_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -647,7 +647,7 @@ define <vscale x 2 x half> @intrinsic_vfmacc_mask_vf_nxv2f16_f16_nxv2f16(<vscale
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv2f16_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -671,7 +671,7 @@ define <vscale x 4 x half>  @intrinsic_vfmacc_vf_nxv4f16_f16_nxv4f16(<vscale x 4
 ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv4f16_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -695,7 +695,7 @@ define <vscale x 4 x half> @intrinsic_vfmacc_mask_vf_nxv4f16_f16_nxv4f16(<vscale
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv4f16_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -719,7 +719,7 @@ define <vscale x 8 x half>  @intrinsic_vfmacc_vf_nxv8f16_f16_nxv8f16(<vscale x 8
 ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv8f16_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v16, ft0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -743,7 +743,7 @@ define <vscale x 8 x half> @intrinsic_vfmacc_mask_vf_nxv8f16_f16_nxv8f16(<vscale
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv8f16_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v16, ft0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -767,7 +767,7 @@ define <vscale x 16 x half>  @intrinsic_vfmacc_vf_nxv16f16_f16_nxv16f16(<vscale
 ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv16f16_f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v16, ft0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -791,7 +791,7 @@ define <vscale x 16 x half> @intrinsic_vfmacc_mask_vf_nxv16f16_f16_nxv16f16(<vsc
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv16f16_f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v16, ft0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -815,7 +815,7 @@ define <vscale x 1 x float>  @intrinsic_vfmacc_vf_nxv1f32_f32_nxv1f32(<vscale x
 ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv1f32_f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -839,7 +839,7 @@ define <vscale x 1 x float> @intrinsic_vfmacc_mask_vf_nxv1f32_f32_nxv1f32(<vscal
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv1f32_f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -863,7 +863,7 @@ define <vscale x 2 x float>  @intrinsic_vfmacc_vf_nxv2f32_f32_nxv2f32(<vscale x
 ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv2f32_f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -887,7 +887,7 @@ define <vscale x 2 x float> @intrinsic_vfmacc_mask_vf_nxv2f32_f32_nxv2f32(<vscal
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv2f32_f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -911,7 +911,7 @@ define <vscale x 4 x float>  @intrinsic_vfmacc_vf_nxv4f32_f32_nxv4f32(<vscale x
 ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv4f32_f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v16, ft0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -935,7 +935,7 @@ define <vscale x 4 x float> @intrinsic_vfmacc_mask_vf_nxv4f32_f32_nxv4f32(<vscal
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv4f32_f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v16, ft0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -959,7 +959,7 @@ define <vscale x 8 x float>  @intrinsic_vfmacc_vf_nxv8f32_f32_nxv8f32(<vscale x
 ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv8f32_f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v16, ft0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -983,7 +983,7 @@ define <vscale x 8 x float> @intrinsic_vfmacc_mask_vf_nxv8f32_f32_nxv8f32(<vscal
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv8f32_f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v16, ft0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1007,7 +1007,7 @@ define <vscale x 1 x double>  @intrinsic_vfmacc_vf_nxv1f64_f64_nxv1f64(<vscale x
 ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv1f64_f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1031,7 +1031,7 @@ define <vscale x 1 x double> @intrinsic_vfmacc_mask_vf_nxv1f64_f64_nxv1f64(<vsca
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv1f64_f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1055,7 +1055,7 @@ define <vscale x 2 x double>  @intrinsic_vfmacc_vf_nxv2f64_f64_nxv2f64(<vscale x
 ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv2f64_f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v16, ft0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1079,7 +1079,7 @@ define <vscale x 2 x double> @intrinsic_vfmacc_mask_vf_nxv2f64_f64_nxv2f64(<vsca
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv2f64_f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v16, ft0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1103,7 +1103,7 @@ define <vscale x 4 x double>  @intrinsic_vfmacc_vf_nxv4f64_f64_nxv4f64(<vscale x
 ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv4f64_f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v16, ft0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1127,7 +1127,7 @@ define <vscale x 4 x double> @intrinsic_vfmacc_mask_vf_nxv4f64_f64_nxv4f64(<vsca
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv4f64_f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v16, ft0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv32.ll
index 344a21bb0895..5111bb181c21 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv32.ll
@@ -10,7 +10,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfmadd.nxv1f16.nxv1f16(
 define <vscale x 1 x half>  @intrinsic_vfmadd_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfmadd.mask.nxv1f16.nxv1f16(
 define <vscale x 1 x half>  @intrinsic_vfmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfmadd.nxv2f16.nxv2f16(
 define <vscale x 2 x half>  @intrinsic_vfmadd_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv2f16_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfmadd.mask.nxv2f16.nxv2f16(
 define <vscale x 2 x half>  @intrinsic_vfmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfmadd.nxv4f16.nxv4f16(
 define <vscale x 4 x half>  @intrinsic_vfmadd_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfmadd.mask.nxv4f16.nxv4f16(
 define <vscale x 4 x half>  @intrinsic_vfmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfmadd.nxv8f16.nxv8f16(
 define <vscale x 8 x half>  @intrinsic_vfmadd_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv8f16_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v16, v18, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfmadd.mask.nxv8f16.nxv8f16(
 define <vscale x 8 x half>  @intrinsic_vfmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v16, v18, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -196,7 +196,7 @@ define <vscale x 16 x half>  @intrinsic_vfmadd_vv_nxv16f16_nxv16f16_nxv16f16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v16, v20, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -221,7 +221,7 @@ define <vscale x 16 x half>  @intrinsic_vfmadd_mask_vv_nxv16f16_nxv16f16_nxv16f1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v16, v20, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -244,7 +244,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfmadd.nxv1f32.nxv1f32(
 define <vscale x 1 x float>  @intrinsic_vfmadd_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv1f32_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -267,7 +267,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfmadd.mask.nxv1f32.nxv1f32(
 define <vscale x 1 x float>  @intrinsic_vfmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -290,7 +290,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfmadd.nxv2f32.nxv2f32(
 define <vscale x 2 x float>  @intrinsic_vfmadd_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -313,7 +313,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfmadd.mask.nxv2f32.nxv2f32(
 define <vscale x 2 x float>  @intrinsic_vfmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -336,7 +336,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfmadd.nxv4f32.nxv4f32(
 define <vscale x 4 x float>  @intrinsic_vfmadd_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv4f32_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v16, v18, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -359,7 +359,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfmadd.mask.nxv4f32.nxv4f32(
 define <vscale x 4 x float>  @intrinsic_vfmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v16, v18, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -384,7 +384,7 @@ define <vscale x 8 x float>  @intrinsic_vfmadd_vv_nxv8f32_nxv8f32_nxv8f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v16, v20, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -409,7 +409,7 @@ define <vscale x 8 x float>  @intrinsic_vfmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v16, v20, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -433,7 +433,7 @@ define <vscale x 1 x half>  @intrinsic_vfmadd_vf_nxv1f16_f16_nxv1f16(<vscale x 1
 ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv1f16_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -457,7 +457,7 @@ define <vscale x 1 x half> @intrinsic_vfmadd_mask_vf_nxv1f16_f16_nxv1f16(<vscale
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv1f16_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -481,7 +481,7 @@ define <vscale x 2 x half>  @intrinsic_vfmadd_vf_nxv2f16_f16_nxv2f16(<vscale x 2
 ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv2f16_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -505,7 +505,7 @@ define <vscale x 2 x half> @intrinsic_vfmadd_mask_vf_nxv2f16_f16_nxv2f16(<vscale
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv2f16_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -529,7 +529,7 @@ define <vscale x 4 x half>  @intrinsic_vfmadd_vf_nxv4f16_f16_nxv4f16(<vscale x 4
 ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv4f16_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -553,7 +553,7 @@ define <vscale x 4 x half> @intrinsic_vfmadd_mask_vf_nxv4f16_f16_nxv4f16(<vscale
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv4f16_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -577,7 +577,7 @@ define <vscale x 8 x half>  @intrinsic_vfmadd_vf_nxv8f16_f16_nxv8f16(<vscale x 8
 ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv8f16_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v16, ft0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -601,7 +601,7 @@ define <vscale x 8 x half> @intrinsic_vfmadd_mask_vf_nxv8f16_f16_nxv8f16(<vscale
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv8f16_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v16, ft0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -625,7 +625,7 @@ define <vscale x 16 x half>  @intrinsic_vfmadd_vf_nxv16f16_f16_nxv16f16(<vscale
 ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv16f16_f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v16, ft0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -649,7 +649,7 @@ define <vscale x 16 x half> @intrinsic_vfmadd_mask_vf_nxv16f16_f16_nxv16f16(<vsc
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv16f16_f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v16, ft0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -673,7 +673,7 @@ define <vscale x 1 x float>  @intrinsic_vfmadd_vf_nxv1f32_f32_nxv1f32(<vscale x
 ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv1f32_f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -697,7 +697,7 @@ define <vscale x 1 x float> @intrinsic_vfmadd_mask_vf_nxv1f32_f32_nxv1f32(<vscal
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv1f32_f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -721,7 +721,7 @@ define <vscale x 2 x float>  @intrinsic_vfmadd_vf_nxv2f32_f32_nxv2f32(<vscale x
 ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv2f32_f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -745,7 +745,7 @@ define <vscale x 2 x float> @intrinsic_vfmadd_mask_vf_nxv2f32_f32_nxv2f32(<vscal
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv2f32_f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -769,7 +769,7 @@ define <vscale x 4 x float>  @intrinsic_vfmadd_vf_nxv4f32_f32_nxv4f32(<vscale x
 ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv4f32_f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v16, ft0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -793,7 +793,7 @@ define <vscale x 4 x float> @intrinsic_vfmadd_mask_vf_nxv4f32_f32_nxv4f32(<vscal
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv4f32_f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v16, ft0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -817,7 +817,7 @@ define <vscale x 8 x float>  @intrinsic_vfmadd_vf_nxv8f32_f32_nxv8f32(<vscale x
 ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv8f32_f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v16, ft0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -841,7 +841,7 @@ define <vscale x 8 x float> @intrinsic_vfmadd_mask_vf_nxv8f32_f32_nxv8f32(<vscal
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv8f32_f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v16, ft0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv64.ll
index 993f535777e0..cbcced115d2f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv64.ll
@@ -10,7 +10,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfmadd.nxv1f16.nxv1f16(
 define <vscale x 1 x half>  @intrinsic_vfmadd_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfmadd.mask.nxv1f16.nxv1f16(
 define <vscale x 1 x half>  @intrinsic_vfmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfmadd.nxv2f16.nxv2f16(
 define <vscale x 2 x half>  @intrinsic_vfmadd_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv2f16_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfmadd.mask.nxv2f16.nxv2f16(
 define <vscale x 2 x half>  @intrinsic_vfmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfmadd.nxv4f16.nxv4f16(
 define <vscale x 4 x half>  @intrinsic_vfmadd_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfmadd.mask.nxv4f16.nxv4f16(
 define <vscale x 4 x half>  @intrinsic_vfmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfmadd.nxv8f16.nxv8f16(
 define <vscale x 8 x half>  @intrinsic_vfmadd_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv8f16_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v16, v18, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfmadd.mask.nxv8f16.nxv8f16(
 define <vscale x 8 x half>  @intrinsic_vfmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v16, v18, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -196,7 +196,7 @@ define <vscale x 16 x half>  @intrinsic_vfmadd_vv_nxv16f16_nxv16f16_nxv16f16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v16, v20, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -221,7 +221,7 @@ define <vscale x 16 x half>  @intrinsic_vfmadd_mask_vv_nxv16f16_nxv16f16_nxv16f1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v16, v20, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -244,7 +244,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfmadd.nxv1f32.nxv1f32(
 define <vscale x 1 x float>  @intrinsic_vfmadd_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv1f32_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -267,7 +267,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfmadd.mask.nxv1f32.nxv1f32(
 define <vscale x 1 x float>  @intrinsic_vfmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -290,7 +290,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfmadd.nxv2f32.nxv2f32(
 define <vscale x 2 x float>  @intrinsic_vfmadd_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -313,7 +313,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfmadd.mask.nxv2f32.nxv2f32(
 define <vscale x 2 x float>  @intrinsic_vfmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -336,7 +336,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfmadd.nxv4f32.nxv4f32(
 define <vscale x 4 x float>  @intrinsic_vfmadd_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv4f32_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v16, v18, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -359,7 +359,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfmadd.mask.nxv4f32.nxv4f32(
 define <vscale x 4 x float>  @intrinsic_vfmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v16, v18, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -384,7 +384,7 @@ define <vscale x 8 x float>  @intrinsic_vfmadd_vv_nxv8f32_nxv8f32_nxv8f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v16, v20, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -409,7 +409,7 @@ define <vscale x 8 x float>  @intrinsic_vfmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v16, v20, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -432,7 +432,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfmadd.nxv1f64.nxv1f64(
 define <vscale x 1 x double>  @intrinsic_vfmadd_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -455,7 +455,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfmadd.mask.nxv1f64.nxv1f64(
 define <vscale x 1 x double>  @intrinsic_vfmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -478,7 +478,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfmadd.nxv2f64.nxv2f64(
 define <vscale x 2 x double>  @intrinsic_vfmadd_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv2f64_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v16, v18, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -501,7 +501,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfmadd.mask.nxv2f64.nxv2f64(
 define <vscale x 2 x double>  @intrinsic_vfmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v16, v18, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -526,7 +526,7 @@ define <vscale x 4 x double>  @intrinsic_vfmadd_vv_nxv4f64_nxv4f64_nxv4f64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e64,m4,ta,mu
 ; CHECK-NEXT:    vle64.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v16, v20, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -551,7 +551,7 @@ define <vscale x 4 x double>  @intrinsic_vfmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e64,m4,ta,mu
 ; CHECK-NEXT:    vle64.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v16, v20, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -575,7 +575,7 @@ define <vscale x 1 x half>  @intrinsic_vfmadd_vf_nxv1f16_f16_nxv1f16(<vscale x 1
 ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv1f16_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -599,7 +599,7 @@ define <vscale x 1 x half> @intrinsic_vfmadd_mask_vf_nxv1f16_f16_nxv1f16(<vscale
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv1f16_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -623,7 +623,7 @@ define <vscale x 2 x half>  @intrinsic_vfmadd_vf_nxv2f16_f16_nxv2f16(<vscale x 2
 ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv2f16_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -647,7 +647,7 @@ define <vscale x 2 x half> @intrinsic_vfmadd_mask_vf_nxv2f16_f16_nxv2f16(<vscale
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv2f16_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -671,7 +671,7 @@ define <vscale x 4 x half>  @intrinsic_vfmadd_vf_nxv4f16_f16_nxv4f16(<vscale x 4
 ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv4f16_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -695,7 +695,7 @@ define <vscale x 4 x half> @intrinsic_vfmadd_mask_vf_nxv4f16_f16_nxv4f16(<vscale
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv4f16_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -719,7 +719,7 @@ define <vscale x 8 x half>  @intrinsic_vfmadd_vf_nxv8f16_f16_nxv8f16(<vscale x 8
 ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv8f16_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v16, ft0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -743,7 +743,7 @@ define <vscale x 8 x half> @intrinsic_vfmadd_mask_vf_nxv8f16_f16_nxv8f16(<vscale
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv8f16_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v16, ft0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -767,7 +767,7 @@ define <vscale x 16 x half>  @intrinsic_vfmadd_vf_nxv16f16_f16_nxv16f16(<vscale
 ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv16f16_f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v16, ft0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -791,7 +791,7 @@ define <vscale x 16 x half> @intrinsic_vfmadd_mask_vf_nxv16f16_f16_nxv16f16(<vsc
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv16f16_f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v16, ft0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -815,7 +815,7 @@ define <vscale x 1 x float>  @intrinsic_vfmadd_vf_nxv1f32_f32_nxv1f32(<vscale x
 ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv1f32_f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -839,7 +839,7 @@ define <vscale x 1 x float> @intrinsic_vfmadd_mask_vf_nxv1f32_f32_nxv1f32(<vscal
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv1f32_f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -863,7 +863,7 @@ define <vscale x 2 x float>  @intrinsic_vfmadd_vf_nxv2f32_f32_nxv2f32(<vscale x
 ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv2f32_f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -887,7 +887,7 @@ define <vscale x 2 x float> @intrinsic_vfmadd_mask_vf_nxv2f32_f32_nxv2f32(<vscal
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv2f32_f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -911,7 +911,7 @@ define <vscale x 4 x float>  @intrinsic_vfmadd_vf_nxv4f32_f32_nxv4f32(<vscale x
 ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv4f32_f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v16, ft0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -935,7 +935,7 @@ define <vscale x 4 x float> @intrinsic_vfmadd_mask_vf_nxv4f32_f32_nxv4f32(<vscal
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv4f32_f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v16, ft0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -959,7 +959,7 @@ define <vscale x 8 x float>  @intrinsic_vfmadd_vf_nxv8f32_f32_nxv8f32(<vscale x
 ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv8f32_f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v16, ft0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -983,7 +983,7 @@ define <vscale x 8 x float> @intrinsic_vfmadd_mask_vf_nxv8f32_f32_nxv8f32(<vscal
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv8f32_f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v16, ft0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1007,7 +1007,7 @@ define <vscale x 1 x double>  @intrinsic_vfmadd_vf_nxv1f64_f64_nxv1f64(<vscale x
 ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv1f64_f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1031,7 +1031,7 @@ define <vscale x 1 x double> @intrinsic_vfmadd_mask_vf_nxv1f64_f64_nxv1f64(<vsca
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv1f64_f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1055,7 +1055,7 @@ define <vscale x 2 x double>  @intrinsic_vfmadd_vf_nxv2f64_f64_nxv2f64(<vscale x
 ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv2f64_f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v16, ft0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1079,7 +1079,7 @@ define <vscale x 2 x double> @intrinsic_vfmadd_mask_vf_nxv2f64_f64_nxv2f64(<vsca
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv2f64_f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v16, ft0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1103,7 +1103,7 @@ define <vscale x 4 x double>  @intrinsic_vfmadd_vf_nxv4f64_f64_nxv4f64(<vscale x
 ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv4f64_f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v16, ft0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1127,7 +1127,7 @@ define <vscale x 4 x double> @intrinsic_vfmadd_mask_vf_nxv4f64_f64_nxv4f64(<vsca
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv4f64_f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v16, ft0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmax-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmax-rv32.ll
index 5c7b025f4f9c..55a36db67232 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmax-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmax-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfmax.mask.nxv1f16.nxv1f16(
 define <vscale x 1 x half> @intrinsic_vfmax_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv1f16_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vfmax.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfmax.mask.nxv2f16.nxv2f16(
 define <vscale x 2 x half> @intrinsic_vfmax_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv2f16_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vfmax.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfmax.mask.nxv4f16.nxv4f16(
 define <vscale x 4 x half> @intrinsic_vfmax_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv4f16_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vfmax.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfmax.mask.nxv8f16.nxv8f16(
 define <vscale x 8 x half> @intrinsic_vfmax_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv8f16_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vfmax.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfmax.mask.nxv16f16.nxv16f16(
 define <vscale x 16 x half> @intrinsic_vfmax_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv16f16_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vfmax.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x half> @llvm.riscv.vfmax.mask.nxv32f16.nxv32f16(
 define <vscale x 32 x half> @intrinsic_vfmax_mask_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv32f16_nxv32f16_nxv32f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x half> @llvm.riscv.vfmax.mask.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -268,7 +268,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfmax.mask.nxv1f32.nxv1f32(
 define <vscale x 1 x float> @intrinsic_vfmax_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv1f32_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfmax.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -308,7 +308,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfmax.mask.nxv2f32.nxv2f32(
 define <vscale x 2 x float> @intrinsic_vfmax_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv2f32_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfmax.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -348,7 +348,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfmax.mask.nxv4f32.nxv4f32(
 define <vscale x 4 x float> @intrinsic_vfmax_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv4f32_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfmax.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -388,7 +388,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfmax.mask.nxv8f32.nxv8f32(
 define <vscale x 8 x float> @intrinsic_vfmax_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv8f32_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfmax.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -428,7 +428,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfmax.mask.nxv16f32.nxv16f32(
 define <vscale x 16 x float> @intrinsic_vfmax_mask_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv16f32_nxv16f32_nxv16f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfmax.mask.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -468,7 +468,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfmax.mask.nxv1f16.f16(
 define <vscale x 1 x half> @intrinsic_vfmax_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv1f16_nxv1f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vfmax.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -508,7 +508,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfmax.mask.nxv2f16.f16(
 define <vscale x 2 x half> @intrinsic_vfmax_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv2f16_nxv2f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vfmax.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -548,7 +548,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfmax.mask.nxv4f16.f16(
 define <vscale x 4 x half> @intrinsic_vfmax_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv4f16_nxv4f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vfmax.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -588,7 +588,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfmax.mask.nxv8f16.f16(
 define <vscale x 8 x half> @intrinsic_vfmax_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv8f16_nxv8f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vfmax.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -628,7 +628,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfmax.mask.nxv16f16.f16(
 define <vscale x 16 x half> @intrinsic_vfmax_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv16f16_nxv16f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vfmax.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -668,7 +668,7 @@ declare <vscale x 32 x half> @llvm.riscv.vfmax.mask.nxv32f16.f16(
 define <vscale x 32 x half> @intrinsic_vfmax_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv32f16_nxv32f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 32 x half> @llvm.riscv.vfmax.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -708,7 +708,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfmax.mask.nxv1f32.f32(
 define <vscale x 1 x float> @intrinsic_vfmax_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv1f32_nxv1f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfmax.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -748,7 +748,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfmax.mask.nxv2f32.f32(
 define <vscale x 2 x float> @intrinsic_vfmax_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv2f32_nxv2f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfmax.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -788,7 +788,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfmax.mask.nxv4f32.f32(
 define <vscale x 4 x float> @intrinsic_vfmax_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv4f32_nxv4f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfmax.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -828,7 +828,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfmax.mask.nxv8f32.f32(
 define <vscale x 8 x float> @intrinsic_vfmax_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv8f32_nxv8f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfmax.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -868,7 +868,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfmax.mask.nxv16f32.f32(
 define <vscale x 16 x float> @intrinsic_vfmax_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv16f32_nxv16f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfmax.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmax-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmax-rv64.ll
index c24eccdb84a8..b77f6cae2545 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmax-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmax-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfmax.mask.nxv1f16.nxv1f16(
 define <vscale x 1 x half> @intrinsic_vfmax_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv1f16_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vfmax.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfmax.mask.nxv2f16.nxv2f16(
 define <vscale x 2 x half> @intrinsic_vfmax_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv2f16_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vfmax.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfmax.mask.nxv4f16.nxv4f16(
 define <vscale x 4 x half> @intrinsic_vfmax_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv4f16_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vfmax.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfmax.mask.nxv8f16.nxv8f16(
 define <vscale x 8 x half> @intrinsic_vfmax_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv8f16_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vfmax.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfmax.mask.nxv16f16.nxv16f16(
 define <vscale x 16 x half> @intrinsic_vfmax_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv16f16_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vfmax.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x half> @llvm.riscv.vfmax.mask.nxv32f16.nxv32f16(
 define <vscale x 32 x half> @intrinsic_vfmax_mask_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv32f16_nxv32f16_nxv32f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x half> @llvm.riscv.vfmax.mask.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -268,7 +268,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfmax.mask.nxv1f32.nxv1f32(
 define <vscale x 1 x float> @intrinsic_vfmax_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv1f32_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfmax.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -308,7 +308,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfmax.mask.nxv2f32.nxv2f32(
 define <vscale x 2 x float> @intrinsic_vfmax_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv2f32_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfmax.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -348,7 +348,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfmax.mask.nxv4f32.nxv4f32(
 define <vscale x 4 x float> @intrinsic_vfmax_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv4f32_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfmax.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -388,7 +388,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfmax.mask.nxv8f32.nxv8f32(
 define <vscale x 8 x float> @intrinsic_vfmax_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv8f32_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfmax.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -428,7 +428,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfmax.mask.nxv16f32.nxv16f32(
 define <vscale x 16 x float> @intrinsic_vfmax_mask_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv16f32_nxv16f32_nxv16f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfmax.mask.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -468,7 +468,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfmax.mask.nxv1f64.nxv1f64(
 define <vscale x 1 x double> @intrinsic_vfmax_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv1f64_nxv1f64_nxv1f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x double> @llvm.riscv.vfmax.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -508,7 +508,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfmax.mask.nxv2f64.nxv2f64(
 define <vscale x 2 x double> @intrinsic_vfmax_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv2f64_nxv2f64_nxv2f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x double> @llvm.riscv.vfmax.mask.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -548,7 +548,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfmax.mask.nxv4f64.nxv4f64(
 define <vscale x 4 x double> @intrinsic_vfmax_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv4f64_nxv4f64_nxv4f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x double> @llvm.riscv.vfmax.mask.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -588,7 +588,7 @@ declare <vscale x 8 x double> @llvm.riscv.vfmax.mask.nxv8f64.nxv8f64(
 define <vscale x 8 x double> @intrinsic_vfmax_mask_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv8f64_nxv8f64_nxv8f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x double> @llvm.riscv.vfmax.mask.nxv8f64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -628,7 +628,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfmax.mask.nxv1f16.f16(
 define <vscale x 1 x half> @intrinsic_vfmax_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv1f16_nxv1f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vfmax.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -668,7 +668,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfmax.mask.nxv2f16.f16(
 define <vscale x 2 x half> @intrinsic_vfmax_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv2f16_nxv2f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vfmax.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -708,7 +708,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfmax.mask.nxv4f16.f16(
 define <vscale x 4 x half> @intrinsic_vfmax_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv4f16_nxv4f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vfmax.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -748,7 +748,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfmax.mask.nxv8f16.f16(
 define <vscale x 8 x half> @intrinsic_vfmax_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv8f16_nxv8f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vfmax.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -788,7 +788,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfmax.mask.nxv16f16.f16(
 define <vscale x 16 x half> @intrinsic_vfmax_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv16f16_nxv16f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vfmax.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -828,7 +828,7 @@ declare <vscale x 32 x half> @llvm.riscv.vfmax.mask.nxv32f16.f16(
 define <vscale x 32 x half> @intrinsic_vfmax_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv32f16_nxv32f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 32 x half> @llvm.riscv.vfmax.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -868,7 +868,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfmax.mask.nxv1f32.f32(
 define <vscale x 1 x float> @intrinsic_vfmax_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv1f32_nxv1f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfmax.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -908,7 +908,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfmax.mask.nxv2f32.f32(
 define <vscale x 2 x float> @intrinsic_vfmax_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv2f32_nxv2f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfmax.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -948,7 +948,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfmax.mask.nxv4f32.f32(
 define <vscale x 4 x float> @intrinsic_vfmax_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv4f32_nxv4f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfmax.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -988,7 +988,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfmax.mask.nxv8f32.f32(
 define <vscale x 8 x float> @intrinsic_vfmax_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv8f32_nxv8f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfmax.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfmax.mask.nxv16f32.f32(
 define <vscale x 16 x float> @intrinsic_vfmax_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv16f32_nxv16f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfmax.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfmax.mask.nxv1f64.f64(
 define <vscale x 1 x double> @intrinsic_vfmax_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv1f64_nxv1f64_f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 1 x double> @llvm.riscv.vfmax.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfmax.mask.nxv2f64.f64(
 define <vscale x 2 x double> @intrinsic_vfmax_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv2f64_nxv2f64_f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 2 x double> @llvm.riscv.vfmax.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfmax.mask.nxv4f64.f64(
 define <vscale x 4 x double> @intrinsic_vfmax_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv4f64_nxv4f64_f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 4 x double> @llvm.riscv.vfmax.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 8 x double> @llvm.riscv.vfmax.mask.nxv8f64.f64(
 define <vscale x 8 x double> @intrinsic_vfmax_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv8f64_nxv8f64_f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 8 x double> @llvm.riscv.vfmax.mask.nxv8f64.f64(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmin-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmin-rv32.ll
index ea6f019ebd61..3a264a8a41cb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmin-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmin-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16(
 define <vscale x 1 x half> @intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfmin.mask.nxv2f16.nxv2f16(
 define <vscale x 2 x half> @intrinsic_vfmin_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv2f16_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vfmin.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfmin.mask.nxv4f16.nxv4f16(
 define <vscale x 4 x half> @intrinsic_vfmin_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv4f16_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vfmin.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfmin.mask.nxv8f16.nxv8f16(
 define <vscale x 8 x half> @intrinsic_vfmin_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv8f16_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vfmin.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfmin.mask.nxv16f16.nxv16f16(
 define <vscale x 16 x half> @intrinsic_vfmin_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv16f16_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vfmin.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x half> @llvm.riscv.vfmin.mask.nxv32f16.nxv32f16(
 define <vscale x 32 x half> @intrinsic_vfmin_mask_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv32f16_nxv32f16_nxv32f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x half> @llvm.riscv.vfmin.mask.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -268,7 +268,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32(
 define <vscale x 1 x float> @intrinsic_vfmin_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f32_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -308,7 +308,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfmin.mask.nxv2f32.nxv2f32(
 define <vscale x 2 x float> @intrinsic_vfmin_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv2f32_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfmin.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -348,7 +348,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfmin.mask.nxv4f32.nxv4f32(
 define <vscale x 4 x float> @intrinsic_vfmin_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv4f32_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfmin.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -388,7 +388,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfmin.mask.nxv8f32.nxv8f32(
 define <vscale x 8 x float> @intrinsic_vfmin_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv8f32_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfmin.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -428,7 +428,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfmin.mask.nxv16f32.nxv16f32(
 define <vscale x 16 x float> @intrinsic_vfmin_mask_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv16f32_nxv16f32_nxv16f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfmin.mask.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -468,7 +468,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfmin.mask.nxv1f16.f16(
 define <vscale x 1 x half> @intrinsic_vfmin_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv1f16_nxv1f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vfmin.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -508,7 +508,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfmin.mask.nxv2f16.f16(
 define <vscale x 2 x half> @intrinsic_vfmin_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv2f16_nxv2f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vfmin.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -548,7 +548,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfmin.mask.nxv4f16.f16(
 define <vscale x 4 x half> @intrinsic_vfmin_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv4f16_nxv4f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vfmin.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -588,7 +588,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfmin.mask.nxv8f16.f16(
 define <vscale x 8 x half> @intrinsic_vfmin_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv8f16_nxv8f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vfmin.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -628,7 +628,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfmin.mask.nxv16f16.f16(
 define <vscale x 16 x half> @intrinsic_vfmin_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv16f16_nxv16f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vfmin.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -668,7 +668,7 @@ declare <vscale x 32 x half> @llvm.riscv.vfmin.mask.nxv32f16.f16(
 define <vscale x 32 x half> @intrinsic_vfmin_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv32f16_nxv32f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 32 x half> @llvm.riscv.vfmin.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -708,7 +708,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfmin.mask.nxv1f32.f32(
 define <vscale x 1 x float> @intrinsic_vfmin_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv1f32_nxv1f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfmin.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -748,7 +748,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfmin.mask.nxv2f32.f32(
 define <vscale x 2 x float> @intrinsic_vfmin_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv2f32_nxv2f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfmin.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -788,7 +788,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfmin.mask.nxv4f32.f32(
 define <vscale x 4 x float> @intrinsic_vfmin_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv4f32_nxv4f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfmin.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -828,7 +828,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfmin.mask.nxv8f32.f32(
 define <vscale x 8 x float> @intrinsic_vfmin_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv8f32_nxv8f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfmin.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -868,7 +868,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfmin.mask.nxv16f32.f32(
 define <vscale x 16 x float> @intrinsic_vfmin_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv16f32_nxv16f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfmin.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmin-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmin-rv64.ll
index 815badcfc75a..f5f21b22d161 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmin-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmin-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16(
 define <vscale x 1 x half> @intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfmin.mask.nxv2f16.nxv2f16(
 define <vscale x 2 x half> @intrinsic_vfmin_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv2f16_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vfmin.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfmin.mask.nxv4f16.nxv4f16(
 define <vscale x 4 x half> @intrinsic_vfmin_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv4f16_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vfmin.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfmin.mask.nxv8f16.nxv8f16(
 define <vscale x 8 x half> @intrinsic_vfmin_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv8f16_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vfmin.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfmin.mask.nxv16f16.nxv16f16(
 define <vscale x 16 x half> @intrinsic_vfmin_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv16f16_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vfmin.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x half> @llvm.riscv.vfmin.mask.nxv32f16.nxv32f16(
 define <vscale x 32 x half> @intrinsic_vfmin_mask_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv32f16_nxv32f16_nxv32f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x half> @llvm.riscv.vfmin.mask.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -268,7 +268,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32(
 define <vscale x 1 x float> @intrinsic_vfmin_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f32_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -308,7 +308,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfmin.mask.nxv2f32.nxv2f32(
 define <vscale x 2 x float> @intrinsic_vfmin_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv2f32_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfmin.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -348,7 +348,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfmin.mask.nxv4f32.nxv4f32(
 define <vscale x 4 x float> @intrinsic_vfmin_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv4f32_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfmin.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -388,7 +388,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfmin.mask.nxv8f32.nxv8f32(
 define <vscale x 8 x float> @intrinsic_vfmin_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv8f32_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfmin.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -428,7 +428,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfmin.mask.nxv16f32.nxv16f32(
 define <vscale x 16 x float> @intrinsic_vfmin_mask_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv16f32_nxv16f32_nxv16f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfmin.mask.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -468,7 +468,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfmin.mask.nxv1f64.nxv1f64(
 define <vscale x 1 x double> @intrinsic_vfmin_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f64_nxv1f64_nxv1f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x double> @llvm.riscv.vfmin.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -508,7 +508,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfmin.mask.nxv2f64.nxv2f64(
 define <vscale x 2 x double> @intrinsic_vfmin_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv2f64_nxv2f64_nxv2f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x double> @llvm.riscv.vfmin.mask.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -548,7 +548,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfmin.mask.nxv4f64.nxv4f64(
 define <vscale x 4 x double> @intrinsic_vfmin_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv4f64_nxv4f64_nxv4f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x double> @llvm.riscv.vfmin.mask.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -588,7 +588,7 @@ declare <vscale x 8 x double> @llvm.riscv.vfmin.mask.nxv8f64.nxv8f64(
 define <vscale x 8 x double> @intrinsic_vfmin_mask_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv8f64_nxv8f64_nxv8f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x double> @llvm.riscv.vfmin.mask.nxv8f64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -628,7 +628,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfmin.mask.nxv1f16.f16(
 define <vscale x 1 x half> @intrinsic_vfmin_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv1f16_nxv1f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vfmin.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -668,7 +668,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfmin.mask.nxv2f16.f16(
 define <vscale x 2 x half> @intrinsic_vfmin_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv2f16_nxv2f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vfmin.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -708,7 +708,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfmin.mask.nxv4f16.f16(
 define <vscale x 4 x half> @intrinsic_vfmin_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv4f16_nxv4f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vfmin.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -748,7 +748,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfmin.mask.nxv8f16.f16(
 define <vscale x 8 x half> @intrinsic_vfmin_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv8f16_nxv8f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vfmin.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -788,7 +788,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfmin.mask.nxv16f16.f16(
 define <vscale x 16 x half> @intrinsic_vfmin_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv16f16_nxv16f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vfmin.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -828,7 +828,7 @@ declare <vscale x 32 x half> @llvm.riscv.vfmin.mask.nxv32f16.f16(
 define <vscale x 32 x half> @intrinsic_vfmin_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv32f16_nxv32f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 32 x half> @llvm.riscv.vfmin.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -868,7 +868,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfmin.mask.nxv1f32.f32(
 define <vscale x 1 x float> @intrinsic_vfmin_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv1f32_nxv1f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfmin.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -908,7 +908,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfmin.mask.nxv2f32.f32(
 define <vscale x 2 x float> @intrinsic_vfmin_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv2f32_nxv2f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfmin.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -948,7 +948,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfmin.mask.nxv4f32.f32(
 define <vscale x 4 x float> @intrinsic_vfmin_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv4f32_nxv4f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfmin.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -988,7 +988,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfmin.mask.nxv8f32.f32(
 define <vscale x 8 x float> @intrinsic_vfmin_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv8f32_nxv8f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfmin.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfmin.mask.nxv16f32.f32(
 define <vscale x 16 x float> @intrinsic_vfmin_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv16f32_nxv16f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfmin.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfmin.mask.nxv1f64.f64(
 define <vscale x 1 x double> @intrinsic_vfmin_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv1f64_nxv1f64_f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 1 x double> @llvm.riscv.vfmin.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfmin.mask.nxv2f64.f64(
 define <vscale x 2 x double> @intrinsic_vfmin_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv2f64_nxv2f64_f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 2 x double> @llvm.riscv.vfmin.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfmin.mask.nxv4f64.f64(
 define <vscale x 4 x double> @intrinsic_vfmin_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv4f64_nxv4f64_f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 4 x double> @llvm.riscv.vfmin.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 8 x double> @llvm.riscv.vfmin.mask.nxv8f64.f64(
 define <vscale x 8 x double> @intrinsic_vfmin_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv8f64_nxv8f64_f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 8 x double> @llvm.riscv.vfmin.mask.nxv8f64.f64(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv32.ll
index d86f9462c6f2..fd22ce05dcba 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv32.ll
@@ -10,7 +10,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfmsac.nxv1f16.nxv1f16(
 define <vscale x 1 x half>  @intrinsic_vfmsac_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfmsac.mask.nxv1f16.nxv1f16(
 define <vscale x 1 x half>  @intrinsic_vfmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfmsac.nxv2f16.nxv2f16(
 define <vscale x 2 x half>  @intrinsic_vfmsac_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv2f16_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfmsac.mask.nxv2f16.nxv2f16(
 define <vscale x 2 x half>  @intrinsic_vfmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfmsac.nxv4f16.nxv4f16(
 define <vscale x 4 x half>  @intrinsic_vfmsac_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfmsac.mask.nxv4f16.nxv4f16(
 define <vscale x 4 x half>  @intrinsic_vfmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfmsac.nxv8f16.nxv8f16(
 define <vscale x 8 x half>  @intrinsic_vfmsac_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv8f16_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v16, v18, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfmsac.mask.nxv8f16.nxv8f16(
 define <vscale x 8 x half>  @intrinsic_vfmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v16, v18, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -196,7 +196,7 @@ define <vscale x 16 x half>  @intrinsic_vfmsac_vv_nxv16f16_nxv16f16_nxv16f16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v16, v20, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -221,7 +221,7 @@ define <vscale x 16 x half>  @intrinsic_vfmsac_mask_vv_nxv16f16_nxv16f16_nxv16f1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v16, v20, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -244,7 +244,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfmsac.nxv1f32.nxv1f32(
 define <vscale x 1 x float>  @intrinsic_vfmsac_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv1f32_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -267,7 +267,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfmsac.mask.nxv1f32.nxv1f32(
 define <vscale x 1 x float>  @intrinsic_vfmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -290,7 +290,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfmsac.nxv2f32.nxv2f32(
 define <vscale x 2 x float>  @intrinsic_vfmsac_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -313,7 +313,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfmsac.mask.nxv2f32.nxv2f32(
 define <vscale x 2 x float>  @intrinsic_vfmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -336,7 +336,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfmsac.nxv4f32.nxv4f32(
 define <vscale x 4 x float>  @intrinsic_vfmsac_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv4f32_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v16, v18, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -359,7 +359,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfmsac.mask.nxv4f32.nxv4f32(
 define <vscale x 4 x float>  @intrinsic_vfmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v16, v18, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -384,7 +384,7 @@ define <vscale x 8 x float>  @intrinsic_vfmsac_vv_nxv8f32_nxv8f32_nxv8f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v16, v20, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -409,7 +409,7 @@ define <vscale x 8 x float>  @intrinsic_vfmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v16, v20, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -433,7 +433,7 @@ define <vscale x 1 x half>  @intrinsic_vfmsac_vf_nxv1f16_f16_nxv1f16(<vscale x 1
 ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv1f16_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -457,7 +457,7 @@ define <vscale x 1 x half> @intrinsic_vfmsac_mask_vf_nxv1f16_f16_nxv1f16(<vscale
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv1f16_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -481,7 +481,7 @@ define <vscale x 2 x half>  @intrinsic_vfmsac_vf_nxv2f16_f16_nxv2f16(<vscale x 2
 ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv2f16_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -505,7 +505,7 @@ define <vscale x 2 x half> @intrinsic_vfmsac_mask_vf_nxv2f16_f16_nxv2f16(<vscale
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv2f16_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -529,7 +529,7 @@ define <vscale x 4 x half>  @intrinsic_vfmsac_vf_nxv4f16_f16_nxv4f16(<vscale x 4
 ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv4f16_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -553,7 +553,7 @@ define <vscale x 4 x half> @intrinsic_vfmsac_mask_vf_nxv4f16_f16_nxv4f16(<vscale
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv4f16_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -577,7 +577,7 @@ define <vscale x 8 x half>  @intrinsic_vfmsac_vf_nxv8f16_f16_nxv8f16(<vscale x 8
 ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv8f16_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v16, ft0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -601,7 +601,7 @@ define <vscale x 8 x half> @intrinsic_vfmsac_mask_vf_nxv8f16_f16_nxv8f16(<vscale
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv8f16_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v16, ft0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -625,7 +625,7 @@ define <vscale x 16 x half>  @intrinsic_vfmsac_vf_nxv16f16_f16_nxv16f16(<vscale
 ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv16f16_f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v16, ft0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -649,7 +649,7 @@ define <vscale x 16 x half> @intrinsic_vfmsac_mask_vf_nxv16f16_f16_nxv16f16(<vsc
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv16f16_f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v16, ft0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -673,7 +673,7 @@ define <vscale x 1 x float>  @intrinsic_vfmsac_vf_nxv1f32_f32_nxv1f32(<vscale x
 ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv1f32_f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -697,7 +697,7 @@ define <vscale x 1 x float> @intrinsic_vfmsac_mask_vf_nxv1f32_f32_nxv1f32(<vscal
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv1f32_f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -721,7 +721,7 @@ define <vscale x 2 x float>  @intrinsic_vfmsac_vf_nxv2f32_f32_nxv2f32(<vscale x
 ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv2f32_f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -745,7 +745,7 @@ define <vscale x 2 x float> @intrinsic_vfmsac_mask_vf_nxv2f32_f32_nxv2f32(<vscal
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv2f32_f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -769,7 +769,7 @@ define <vscale x 4 x float>  @intrinsic_vfmsac_vf_nxv4f32_f32_nxv4f32(<vscale x
 ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv4f32_f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v16, ft0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -793,7 +793,7 @@ define <vscale x 4 x float> @intrinsic_vfmsac_mask_vf_nxv4f32_f32_nxv4f32(<vscal
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv4f32_f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v16, ft0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -817,7 +817,7 @@ define <vscale x 8 x float>  @intrinsic_vfmsac_vf_nxv8f32_f32_nxv8f32(<vscale x
 ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv8f32_f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v16, ft0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -841,7 +841,7 @@ define <vscale x 8 x float> @intrinsic_vfmsac_mask_vf_nxv8f32_f32_nxv8f32(<vscal
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv8f32_f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v16, ft0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv64.ll
index 03364aba8430..65249cf94deb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv64.ll
@@ -10,7 +10,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfmsac.nxv1f16.nxv1f16(
 define <vscale x 1 x half>  @intrinsic_vfmsac_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfmsac.mask.nxv1f16.nxv1f16(
 define <vscale x 1 x half>  @intrinsic_vfmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfmsac.nxv2f16.nxv2f16(
 define <vscale x 2 x half>  @intrinsic_vfmsac_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv2f16_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfmsac.mask.nxv2f16.nxv2f16(
 define <vscale x 2 x half>  @intrinsic_vfmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfmsac.nxv4f16.nxv4f16(
 define <vscale x 4 x half>  @intrinsic_vfmsac_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfmsac.mask.nxv4f16.nxv4f16(
 define <vscale x 4 x half>  @intrinsic_vfmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfmsac.nxv8f16.nxv8f16(
 define <vscale x 8 x half>  @intrinsic_vfmsac_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv8f16_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v16, v18, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfmsac.mask.nxv8f16.nxv8f16(
 define <vscale x 8 x half>  @intrinsic_vfmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v16, v18, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -196,7 +196,7 @@ define <vscale x 16 x half>  @intrinsic_vfmsac_vv_nxv16f16_nxv16f16_nxv16f16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v16, v20, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -221,7 +221,7 @@ define <vscale x 16 x half>  @intrinsic_vfmsac_mask_vv_nxv16f16_nxv16f16_nxv16f1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v16, v20, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -244,7 +244,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfmsac.nxv1f32.nxv1f32(
 define <vscale x 1 x float>  @intrinsic_vfmsac_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv1f32_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -267,7 +267,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfmsac.mask.nxv1f32.nxv1f32(
 define <vscale x 1 x float>  @intrinsic_vfmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -290,7 +290,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfmsac.nxv2f32.nxv2f32(
 define <vscale x 2 x float>  @intrinsic_vfmsac_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -313,7 +313,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfmsac.mask.nxv2f32.nxv2f32(
 define <vscale x 2 x float>  @intrinsic_vfmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -336,7 +336,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfmsac.nxv4f32.nxv4f32(
 define <vscale x 4 x float>  @intrinsic_vfmsac_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv4f32_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v16, v18, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -359,7 +359,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfmsac.mask.nxv4f32.nxv4f32(
 define <vscale x 4 x float>  @intrinsic_vfmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v16, v18, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -384,7 +384,7 @@ define <vscale x 8 x float>  @intrinsic_vfmsac_vv_nxv8f32_nxv8f32_nxv8f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v16, v20, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -409,7 +409,7 @@ define <vscale x 8 x float>  @intrinsic_vfmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v16, v20, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -432,7 +432,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfmsac.nxv1f64.nxv1f64(
 define <vscale x 1 x double>  @intrinsic_vfmsac_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -455,7 +455,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfmsac.mask.nxv1f64.nxv1f64(
 define <vscale x 1 x double>  @intrinsic_vfmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -478,7 +478,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfmsac.nxv2f64.nxv2f64(
 define <vscale x 2 x double>  @intrinsic_vfmsac_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv2f64_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v16, v18, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -501,7 +501,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfmsac.mask.nxv2f64.nxv2f64(
 define <vscale x 2 x double>  @intrinsic_vfmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v16, v18, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -526,7 +526,7 @@ define <vscale x 4 x double>  @intrinsic_vfmsac_vv_nxv4f64_nxv4f64_nxv4f64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e64,m4,ta,mu
 ; CHECK-NEXT:    vle64.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v16, v20, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -551,7 +551,7 @@ define <vscale x 4 x double>  @intrinsic_vfmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e64,m4,ta,mu
 ; CHECK-NEXT:    vle64.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v16, v20, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -575,7 +575,7 @@ define <vscale x 1 x half>  @intrinsic_vfmsac_vf_nxv1f16_f16_nxv1f16(<vscale x 1
 ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv1f16_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -599,7 +599,7 @@ define <vscale x 1 x half> @intrinsic_vfmsac_mask_vf_nxv1f16_f16_nxv1f16(<vscale
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv1f16_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -623,7 +623,7 @@ define <vscale x 2 x half>  @intrinsic_vfmsac_vf_nxv2f16_f16_nxv2f16(<vscale x 2
 ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv2f16_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -647,7 +647,7 @@ define <vscale x 2 x half> @intrinsic_vfmsac_mask_vf_nxv2f16_f16_nxv2f16(<vscale
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv2f16_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -671,7 +671,7 @@ define <vscale x 4 x half>  @intrinsic_vfmsac_vf_nxv4f16_f16_nxv4f16(<vscale x 4
 ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv4f16_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -695,7 +695,7 @@ define <vscale x 4 x half> @intrinsic_vfmsac_mask_vf_nxv4f16_f16_nxv4f16(<vscale
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv4f16_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -719,7 +719,7 @@ define <vscale x 8 x half>  @intrinsic_vfmsac_vf_nxv8f16_f16_nxv8f16(<vscale x 8
 ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv8f16_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v16, ft0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -743,7 +743,7 @@ define <vscale x 8 x half> @intrinsic_vfmsac_mask_vf_nxv8f16_f16_nxv8f16(<vscale
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv8f16_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v16, ft0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -767,7 +767,7 @@ define <vscale x 16 x half>  @intrinsic_vfmsac_vf_nxv16f16_f16_nxv16f16(<vscale
 ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv16f16_f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v16, ft0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -791,7 +791,7 @@ define <vscale x 16 x half> @intrinsic_vfmsac_mask_vf_nxv16f16_f16_nxv16f16(<vsc
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv16f16_f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v16, ft0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -815,7 +815,7 @@ define <vscale x 1 x float>  @intrinsic_vfmsac_vf_nxv1f32_f32_nxv1f32(<vscale x
 ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv1f32_f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -839,7 +839,7 @@ define <vscale x 1 x float> @intrinsic_vfmsac_mask_vf_nxv1f32_f32_nxv1f32(<vscal
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv1f32_f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -863,7 +863,7 @@ define <vscale x 2 x float>  @intrinsic_vfmsac_vf_nxv2f32_f32_nxv2f32(<vscale x
 ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv2f32_f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -887,7 +887,7 @@ define <vscale x 2 x float> @intrinsic_vfmsac_mask_vf_nxv2f32_f32_nxv2f32(<vscal
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv2f32_f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -911,7 +911,7 @@ define <vscale x 4 x float>  @intrinsic_vfmsac_vf_nxv4f32_f32_nxv4f32(<vscale x
 ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv4f32_f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v16, ft0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -935,7 +935,7 @@ define <vscale x 4 x float> @intrinsic_vfmsac_mask_vf_nxv4f32_f32_nxv4f32(<vscal
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv4f32_f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v16, ft0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -959,7 +959,7 @@ define <vscale x 8 x float>  @intrinsic_vfmsac_vf_nxv8f32_f32_nxv8f32(<vscale x
 ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv8f32_f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v16, ft0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -983,7 +983,7 @@ define <vscale x 8 x float> @intrinsic_vfmsac_mask_vf_nxv8f32_f32_nxv8f32(<vscal
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv8f32_f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v16, ft0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1007,7 +1007,7 @@ define <vscale x 1 x double>  @intrinsic_vfmsac_vf_nxv1f64_f64_nxv1f64(<vscale x
 ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv1f64_f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1031,7 +1031,7 @@ define <vscale x 1 x double> @intrinsic_vfmsac_mask_vf_nxv1f64_f64_nxv1f64(<vsca
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv1f64_f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1055,7 +1055,7 @@ define <vscale x 2 x double>  @intrinsic_vfmsac_vf_nxv2f64_f64_nxv2f64(<vscale x
 ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv2f64_f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v16, ft0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1079,7 +1079,7 @@ define <vscale x 2 x double> @intrinsic_vfmsac_mask_vf_nxv2f64_f64_nxv2f64(<vsca
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv2f64_f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v16, ft0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1103,7 +1103,7 @@ define <vscale x 4 x double>  @intrinsic_vfmsac_vf_nxv4f64_f64_nxv4f64(<vscale x
 ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv4f64_f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v16, ft0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1127,7 +1127,7 @@ define <vscale x 4 x double> @intrinsic_vfmsac_mask_vf_nxv4f64_f64_nxv4f64(<vsca
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv4f64_f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v16, ft0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv32.ll
index 1d8cb060e322..0bd2f2f6925a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv32.ll
@@ -10,7 +10,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfmsub.nxv1f16.nxv1f16(
 define <vscale x 1 x half>  @intrinsic_vfmsub_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfmsub.mask.nxv1f16.nxv1f16(
 define <vscale x 1 x half>  @intrinsic_vfmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfmsub.nxv2f16.nxv2f16(
 define <vscale x 2 x half>  @intrinsic_vfmsub_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv2f16_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfmsub.mask.nxv2f16.nxv2f16(
 define <vscale x 2 x half>  @intrinsic_vfmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfmsub.nxv4f16.nxv4f16(
 define <vscale x 4 x half>  @intrinsic_vfmsub_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfmsub.mask.nxv4f16.nxv4f16(
 define <vscale x 4 x half>  @intrinsic_vfmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfmsub.nxv8f16.nxv8f16(
 define <vscale x 8 x half>  @intrinsic_vfmsub_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv8f16_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v16, v18, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfmsub.mask.nxv8f16.nxv8f16(
 define <vscale x 8 x half>  @intrinsic_vfmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v16, v18, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -196,7 +196,7 @@ define <vscale x 16 x half>  @intrinsic_vfmsub_vv_nxv16f16_nxv16f16_nxv16f16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v16, v20, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -221,7 +221,7 @@ define <vscale x 16 x half>  @intrinsic_vfmsub_mask_vv_nxv16f16_nxv16f16_nxv16f1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v16, v20, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -244,7 +244,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfmsub.nxv1f32.nxv1f32(
 define <vscale x 1 x float>  @intrinsic_vfmsub_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv1f32_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -267,7 +267,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfmsub.mask.nxv1f32.nxv1f32(
 define <vscale x 1 x float>  @intrinsic_vfmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -290,7 +290,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfmsub.nxv2f32.nxv2f32(
 define <vscale x 2 x float>  @intrinsic_vfmsub_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -313,7 +313,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfmsub.mask.nxv2f32.nxv2f32(
 define <vscale x 2 x float>  @intrinsic_vfmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -336,7 +336,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfmsub.nxv4f32.nxv4f32(
 define <vscale x 4 x float>  @intrinsic_vfmsub_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv4f32_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v16, v18, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -359,7 +359,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfmsub.mask.nxv4f32.nxv4f32(
 define <vscale x 4 x float>  @intrinsic_vfmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v16, v18, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -384,7 +384,7 @@ define <vscale x 8 x float>  @intrinsic_vfmsub_vv_nxv8f32_nxv8f32_nxv8f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v16, v20, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -409,7 +409,7 @@ define <vscale x 8 x float>  @intrinsic_vfmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v16, v20, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -433,7 +433,7 @@ define <vscale x 1 x half>  @intrinsic_vfmsub_vf_nxv1f16_f16_nxv1f16(<vscale x 1
 ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv1f16_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -457,7 +457,7 @@ define <vscale x 1 x half> @intrinsic_vfmsub_mask_vf_nxv1f16_f16_nxv1f16(<vscale
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv1f16_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -481,7 +481,7 @@ define <vscale x 2 x half>  @intrinsic_vfmsub_vf_nxv2f16_f16_nxv2f16(<vscale x 2
 ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv2f16_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -505,7 +505,7 @@ define <vscale x 2 x half> @intrinsic_vfmsub_mask_vf_nxv2f16_f16_nxv2f16(<vscale
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv2f16_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -529,7 +529,7 @@ define <vscale x 4 x half>  @intrinsic_vfmsub_vf_nxv4f16_f16_nxv4f16(<vscale x 4
 ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv4f16_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -553,7 +553,7 @@ define <vscale x 4 x half> @intrinsic_vfmsub_mask_vf_nxv4f16_f16_nxv4f16(<vscale
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv4f16_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -577,7 +577,7 @@ define <vscale x 8 x half>  @intrinsic_vfmsub_vf_nxv8f16_f16_nxv8f16(<vscale x 8
 ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv8f16_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v16, ft0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -601,7 +601,7 @@ define <vscale x 8 x half> @intrinsic_vfmsub_mask_vf_nxv8f16_f16_nxv8f16(<vscale
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv8f16_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v16, ft0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -625,7 +625,7 @@ define <vscale x 16 x half>  @intrinsic_vfmsub_vf_nxv16f16_f16_nxv16f16(<vscale
 ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv16f16_f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v16, ft0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -649,7 +649,7 @@ define <vscale x 16 x half> @intrinsic_vfmsub_mask_vf_nxv16f16_f16_nxv16f16(<vsc
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv16f16_f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v16, ft0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -673,7 +673,7 @@ define <vscale x 1 x float>  @intrinsic_vfmsub_vf_nxv1f32_f32_nxv1f32(<vscale x
 ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv1f32_f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -697,7 +697,7 @@ define <vscale x 1 x float> @intrinsic_vfmsub_mask_vf_nxv1f32_f32_nxv1f32(<vscal
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv1f32_f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -721,7 +721,7 @@ define <vscale x 2 x float>  @intrinsic_vfmsub_vf_nxv2f32_f32_nxv2f32(<vscale x
 ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv2f32_f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -745,7 +745,7 @@ define <vscale x 2 x float> @intrinsic_vfmsub_mask_vf_nxv2f32_f32_nxv2f32(<vscal
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv2f32_f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -769,7 +769,7 @@ define <vscale x 4 x float>  @intrinsic_vfmsub_vf_nxv4f32_f32_nxv4f32(<vscale x
 ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv4f32_f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v16, ft0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -793,7 +793,7 @@ define <vscale x 4 x float> @intrinsic_vfmsub_mask_vf_nxv4f32_f32_nxv4f32(<vscal
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv4f32_f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v16, ft0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -817,7 +817,7 @@ define <vscale x 8 x float>  @intrinsic_vfmsub_vf_nxv8f32_f32_nxv8f32(<vscale x
 ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv8f32_f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v16, ft0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -841,7 +841,7 @@ define <vscale x 8 x float> @intrinsic_vfmsub_mask_vf_nxv8f32_f32_nxv8f32(<vscal
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv8f32_f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v16, ft0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv64.ll
index 347731829812..4188198f2c94 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv64.ll
@@ -10,7 +10,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfmsub.nxv1f16.nxv1f16(
 define <vscale x 1 x half>  @intrinsic_vfmsub_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfmsub.mask.nxv1f16.nxv1f16(
 define <vscale x 1 x half>  @intrinsic_vfmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfmsub.nxv2f16.nxv2f16(
 define <vscale x 2 x half>  @intrinsic_vfmsub_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv2f16_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfmsub.mask.nxv2f16.nxv2f16(
 define <vscale x 2 x half>  @intrinsic_vfmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfmsub.nxv4f16.nxv4f16(
 define <vscale x 4 x half>  @intrinsic_vfmsub_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfmsub.mask.nxv4f16.nxv4f16(
 define <vscale x 4 x half>  @intrinsic_vfmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfmsub.nxv8f16.nxv8f16(
 define <vscale x 8 x half>  @intrinsic_vfmsub_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv8f16_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v16, v18, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfmsub.mask.nxv8f16.nxv8f16(
 define <vscale x 8 x half>  @intrinsic_vfmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v16, v18, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -196,7 +196,7 @@ define <vscale x 16 x half>  @intrinsic_vfmsub_vv_nxv16f16_nxv16f16_nxv16f16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v16, v20, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -221,7 +221,7 @@ define <vscale x 16 x half>  @intrinsic_vfmsub_mask_vv_nxv16f16_nxv16f16_nxv16f1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v16, v20, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -244,7 +244,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfmsub.nxv1f32.nxv1f32(
 define <vscale x 1 x float>  @intrinsic_vfmsub_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv1f32_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -267,7 +267,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfmsub.mask.nxv1f32.nxv1f32(
 define <vscale x 1 x float>  @intrinsic_vfmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -290,7 +290,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfmsub.nxv2f32.nxv2f32(
 define <vscale x 2 x float>  @intrinsic_vfmsub_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -313,7 +313,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfmsub.mask.nxv2f32.nxv2f32(
 define <vscale x 2 x float>  @intrinsic_vfmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -336,7 +336,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfmsub.nxv4f32.nxv4f32(
 define <vscale x 4 x float>  @intrinsic_vfmsub_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv4f32_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v16, v18, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -359,7 +359,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfmsub.mask.nxv4f32.nxv4f32(
 define <vscale x 4 x float>  @intrinsic_vfmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v16, v18, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -384,7 +384,7 @@ define <vscale x 8 x float>  @intrinsic_vfmsub_vv_nxv8f32_nxv8f32_nxv8f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v16, v20, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -409,7 +409,7 @@ define <vscale x 8 x float>  @intrinsic_vfmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v16, v20, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -432,7 +432,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfmsub.nxv1f64.nxv1f64(
 define <vscale x 1 x double>  @intrinsic_vfmsub_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -455,7 +455,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfmsub.mask.nxv1f64.nxv1f64(
 define <vscale x 1 x double>  @intrinsic_vfmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -478,7 +478,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfmsub.nxv2f64.nxv2f64(
 define <vscale x 2 x double>  @intrinsic_vfmsub_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv2f64_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v16, v18, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -501,7 +501,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfmsub.mask.nxv2f64.nxv2f64(
 define <vscale x 2 x double>  @intrinsic_vfmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v16, v18, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -526,7 +526,7 @@ define <vscale x 4 x double>  @intrinsic_vfmsub_vv_nxv4f64_nxv4f64_nxv4f64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e64,m4,ta,mu
 ; CHECK-NEXT:    vle64.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v16, v20, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -551,7 +551,7 @@ define <vscale x 4 x double>  @intrinsic_vfmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e64,m4,ta,mu
 ; CHECK-NEXT:    vle64.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v16, v20, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -575,7 +575,7 @@ define <vscale x 1 x half>  @intrinsic_vfmsub_vf_nxv1f16_f16_nxv1f16(<vscale x 1
 ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv1f16_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -599,7 +599,7 @@ define <vscale x 1 x half> @intrinsic_vfmsub_mask_vf_nxv1f16_f16_nxv1f16(<vscale
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv1f16_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -623,7 +623,7 @@ define <vscale x 2 x half>  @intrinsic_vfmsub_vf_nxv2f16_f16_nxv2f16(<vscale x 2
 ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv2f16_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -647,7 +647,7 @@ define <vscale x 2 x half> @intrinsic_vfmsub_mask_vf_nxv2f16_f16_nxv2f16(<vscale
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv2f16_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -671,7 +671,7 @@ define <vscale x 4 x half>  @intrinsic_vfmsub_vf_nxv4f16_f16_nxv4f16(<vscale x 4
 ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv4f16_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -695,7 +695,7 @@ define <vscale x 4 x half> @intrinsic_vfmsub_mask_vf_nxv4f16_f16_nxv4f16(<vscale
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv4f16_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -719,7 +719,7 @@ define <vscale x 8 x half>  @intrinsic_vfmsub_vf_nxv8f16_f16_nxv8f16(<vscale x 8
 ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv8f16_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v16, ft0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -743,7 +743,7 @@ define <vscale x 8 x half> @intrinsic_vfmsub_mask_vf_nxv8f16_f16_nxv8f16(<vscale
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv8f16_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v16, ft0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -767,7 +767,7 @@ define <vscale x 16 x half>  @intrinsic_vfmsub_vf_nxv16f16_f16_nxv16f16(<vscale
 ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv16f16_f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v16, ft0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -791,7 +791,7 @@ define <vscale x 16 x half> @intrinsic_vfmsub_mask_vf_nxv16f16_f16_nxv16f16(<vsc
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv16f16_f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v16, ft0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -815,7 +815,7 @@ define <vscale x 1 x float>  @intrinsic_vfmsub_vf_nxv1f32_f32_nxv1f32(<vscale x
 ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv1f32_f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -839,7 +839,7 @@ define <vscale x 1 x float> @intrinsic_vfmsub_mask_vf_nxv1f32_f32_nxv1f32(<vscal
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv1f32_f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -863,7 +863,7 @@ define <vscale x 2 x float>  @intrinsic_vfmsub_vf_nxv2f32_f32_nxv2f32(<vscale x
 ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv2f32_f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -887,7 +887,7 @@ define <vscale x 2 x float> @intrinsic_vfmsub_mask_vf_nxv2f32_f32_nxv2f32(<vscal
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv2f32_f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -911,7 +911,7 @@ define <vscale x 4 x float>  @intrinsic_vfmsub_vf_nxv4f32_f32_nxv4f32(<vscale x
 ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv4f32_f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v16, ft0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -935,7 +935,7 @@ define <vscale x 4 x float> @intrinsic_vfmsub_mask_vf_nxv4f32_f32_nxv4f32(<vscal
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv4f32_f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v16, ft0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -959,7 +959,7 @@ define <vscale x 8 x float>  @intrinsic_vfmsub_vf_nxv8f32_f32_nxv8f32(<vscale x
 ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv8f32_f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v16, ft0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -983,7 +983,7 @@ define <vscale x 8 x float> @intrinsic_vfmsub_mask_vf_nxv8f32_f32_nxv8f32(<vscal
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv8f32_f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v16, ft0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1007,7 +1007,7 @@ define <vscale x 1 x double>  @intrinsic_vfmsub_vf_nxv1f64_f64_nxv1f64(<vscale x
 ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv1f64_f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1031,7 +1031,7 @@ define <vscale x 1 x double> @intrinsic_vfmsub_mask_vf_nxv1f64_f64_nxv1f64(<vsca
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv1f64_f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1055,7 +1055,7 @@ define <vscale x 2 x double>  @intrinsic_vfmsub_vf_nxv2f64_f64_nxv2f64(<vscale x
 ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv2f64_f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v16, ft0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1079,7 +1079,7 @@ define <vscale x 2 x double> @intrinsic_vfmsub_mask_vf_nxv2f64_f64_nxv2f64(<vsca
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv2f64_f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v16, ft0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1103,7 +1103,7 @@ define <vscale x 4 x double>  @intrinsic_vfmsub_vf_nxv4f64_f64_nxv4f64(<vscale x
 ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv4f64_f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v16, ft0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1127,7 +1127,7 @@ define <vscale x 4 x double> @intrinsic_vfmsub_mask_vf_nxv4f64_f64_nxv4f64(<vsca
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv4f64_f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v16, ft0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmul-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmul-rv32.ll
index 438bf82a4691..a4d860054abb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmul-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmul-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfmul.mask.nxv1f16(
 define <vscale x 1 x half> @intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vfmul.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfmul.mask.nxv2f16(
 define <vscale x 2 x half> @intrinsic_vfmul_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vfmul.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfmul.mask.nxv4f16(
 define <vscale x 4 x half> @intrinsic_vfmul_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vfmul.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfmul.mask.nxv8f16(
 define <vscale x 8 x half> @intrinsic_vfmul_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vfmul.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfmul.mask.nxv16f16(
 define <vscale x 16 x half> @intrinsic_vfmul_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vfmul.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x half> @llvm.riscv.vfmul.mask.nxv32f16(
 define <vscale x 32 x half> @intrinsic_vfmul_mask_vv_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv32f16_nxv32f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x half> @llvm.riscv.vfmul.mask.nxv32f16(
     <vscale x 32 x half> %0,
@@ -268,7 +268,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfmul.mask.nxv1f32(
 define <vscale x 1 x float> @intrinsic_vfmul_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfmul.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -308,7 +308,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfmul.mask.nxv2f32(
 define <vscale x 2 x float> @intrinsic_vfmul_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfmul.mask.nxv2f32(
     <vscale x 2 x float> %0,
@@ -348,7 +348,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfmul.mask.nxv4f32(
 define <vscale x 4 x float> @intrinsic_vfmul_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfmul.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -388,7 +388,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfmul.mask.nxv8f32(
 define <vscale x 8 x float> @intrinsic_vfmul_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfmul.mask.nxv8f32(
     <vscale x 8 x float> %0,
@@ -428,7 +428,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfmul.mask.nxv16f32(
 define <vscale x 16 x float> @intrinsic_vfmul_mask_vv_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv16f32_nxv16f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfmul.mask.nxv16f32(
     <vscale x 16 x float> %0,
@@ -468,7 +468,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfmul.mask.nxv1f16.f16(
 define <vscale x 1 x half> @intrinsic_vfmul_mask_vf_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv1f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vfmul.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -508,7 +508,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfmul.mask.nxv2f16.f16(
 define <vscale x 2 x half> @intrinsic_vfmul_mask_vf_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv2f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vfmul.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -548,7 +548,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfmul.mask.nxv4f16.f16(
 define <vscale x 4 x half> @intrinsic_vfmul_mask_vf_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv4f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vfmul.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -588,7 +588,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfmul.mask.nxv8f16.f16(
 define <vscale x 8 x half> @intrinsic_vfmul_mask_vf_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv8f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vfmul.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -628,7 +628,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfmul.mask.nxv16f16.f16(
 define <vscale x 16 x half> @intrinsic_vfmul_mask_vf_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv16f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vfmul.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -668,7 +668,7 @@ declare <vscale x 32 x half> @llvm.riscv.vfmul.mask.nxv32f16.f16(
 define <vscale x 32 x half> @intrinsic_vfmul_mask_vf_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv32f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 32 x half> @llvm.riscv.vfmul.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -708,7 +708,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfmul.mask.nxv1f32.f32(
 define <vscale x 1 x float> @intrinsic_vfmul_mask_vf_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv1f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfmul.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -748,7 +748,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfmul.mask.nxv2f32.f32(
 define <vscale x 2 x float> @intrinsic_vfmul_mask_vf_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv2f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfmul.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -788,7 +788,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfmul.mask.nxv4f32.f32(
 define <vscale x 4 x float> @intrinsic_vfmul_mask_vf_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv4f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfmul.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -828,7 +828,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfmul.mask.nxv8f32.f32(
 define <vscale x 8 x float> @intrinsic_vfmul_mask_vf_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv8f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfmul.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -868,7 +868,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfmul.mask.nxv16f32.f32(
 define <vscale x 16 x float> @intrinsic_vfmul_mask_vf_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv16f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfmul.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmul-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmul-rv64.ll
index a0e3517c6149..3220d1e6d0a7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmul-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmul-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfmul.mask.nxv1f16(
 define <vscale x 1 x half> @intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vfmul.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfmul.mask.nxv2f16(
 define <vscale x 2 x half> @intrinsic_vfmul_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vfmul.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfmul.mask.nxv4f16(
 define <vscale x 4 x half> @intrinsic_vfmul_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vfmul.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfmul.mask.nxv8f16(
 define <vscale x 8 x half> @intrinsic_vfmul_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vfmul.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfmul.mask.nxv16f16(
 define <vscale x 16 x half> @intrinsic_vfmul_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vfmul.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x half> @llvm.riscv.vfmul.mask.nxv32f16(
 define <vscale x 32 x half> @intrinsic_vfmul_mask_vv_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv32f16_nxv32f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x half> @llvm.riscv.vfmul.mask.nxv32f16(
     <vscale x 32 x half> %0,
@@ -268,7 +268,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfmul.mask.nxv1f32(
 define <vscale x 1 x float> @intrinsic_vfmul_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfmul.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -308,7 +308,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfmul.mask.nxv2f32(
 define <vscale x 2 x float> @intrinsic_vfmul_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfmul.mask.nxv2f32(
     <vscale x 2 x float> %0,
@@ -348,7 +348,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfmul.mask.nxv4f32(
 define <vscale x 4 x float> @intrinsic_vfmul_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfmul.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -388,7 +388,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfmul.mask.nxv8f32(
 define <vscale x 8 x float> @intrinsic_vfmul_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfmul.mask.nxv8f32(
     <vscale x 8 x float> %0,
@@ -428,7 +428,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfmul.mask.nxv16f32(
 define <vscale x 16 x float> @intrinsic_vfmul_mask_vv_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv16f32_nxv16f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfmul.mask.nxv16f32(
     <vscale x 16 x float> %0,
@@ -468,7 +468,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfmul.mask.nxv1f64(
 define <vscale x 1 x double> @intrinsic_vfmul_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f64_nxv1f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x double> @llvm.riscv.vfmul.mask.nxv1f64(
     <vscale x 1 x double> %0,
@@ -508,7 +508,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfmul.mask.nxv2f64(
 define <vscale x 2 x double> @intrinsic_vfmul_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv2f64_nxv2f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x double> @llvm.riscv.vfmul.mask.nxv2f64(
     <vscale x 2 x double> %0,
@@ -548,7 +548,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfmul.mask.nxv4f64(
 define <vscale x 4 x double> @intrinsic_vfmul_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv4f64_nxv4f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x double> @llvm.riscv.vfmul.mask.nxv4f64(
     <vscale x 4 x double> %0,
@@ -588,7 +588,7 @@ declare <vscale x 8 x double> @llvm.riscv.vfmul.mask.nxv8f64(
 define <vscale x 8 x double> @intrinsic_vfmul_mask_vv_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv8f64_nxv8f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x double> @llvm.riscv.vfmul.mask.nxv8f64(
     <vscale x 8 x double> %0,
@@ -628,7 +628,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfmul.mask.nxv1f16.f16(
 define <vscale x 1 x half> @intrinsic_vfmul_mask_vf_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv1f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vfmul.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -668,7 +668,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfmul.mask.nxv2f16.f16(
 define <vscale x 2 x half> @intrinsic_vfmul_mask_vf_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv2f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vfmul.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -708,7 +708,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfmul.mask.nxv4f16.f16(
 define <vscale x 4 x half> @intrinsic_vfmul_mask_vf_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv4f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vfmul.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -748,7 +748,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfmul.mask.nxv8f16.f16(
 define <vscale x 8 x half> @intrinsic_vfmul_mask_vf_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv8f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vfmul.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -788,7 +788,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfmul.mask.nxv16f16.f16(
 define <vscale x 16 x half> @intrinsic_vfmul_mask_vf_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv16f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vfmul.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -828,7 +828,7 @@ declare <vscale x 32 x half> @llvm.riscv.vfmul.mask.nxv32f16.f16(
 define <vscale x 32 x half> @intrinsic_vfmul_mask_vf_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv32f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 32 x half> @llvm.riscv.vfmul.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -868,7 +868,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfmul.mask.nxv1f32.f32(
 define <vscale x 1 x float> @intrinsic_vfmul_mask_vf_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv1f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfmul.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -908,7 +908,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfmul.mask.nxv2f32.f32(
 define <vscale x 2 x float> @intrinsic_vfmul_mask_vf_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv2f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfmul.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -948,7 +948,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfmul.mask.nxv4f32.f32(
 define <vscale x 4 x float> @intrinsic_vfmul_mask_vf_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv4f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfmul.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -988,7 +988,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfmul.mask.nxv8f32.f32(
 define <vscale x 8 x float> @intrinsic_vfmul_mask_vf_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv8f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfmul.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfmul.mask.nxv16f32.f32(
 define <vscale x 16 x float> @intrinsic_vfmul_mask_vf_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv16f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfmul.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfmul.mask.nxv1f64.f64(
 define <vscale x 1 x double> @intrinsic_vfmul_mask_vf_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv1f64_f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x double> @llvm.riscv.vfmul.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfmul.mask.nxv2f64.f64(
 define <vscale x 2 x double> @intrinsic_vfmul_mask_vf_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv2f64_f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x double> @llvm.riscv.vfmul.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfmul.mask.nxv4f64.f64(
 define <vscale x 4 x double> @intrinsic_vfmul_mask_vf_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv4f64_f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x double> @llvm.riscv.vfmul.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 8 x double> @llvm.riscv.vfmul.mask.nxv8f64.f64(
 define <vscale x 8 x double> @intrinsic_vfmul_mask_vf_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv8f64_f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x double> @llvm.riscv.vfmul.mask.nxv8f64.f64(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv32.ll
index d90664c7eb37..4c122c14df6d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv32.ll
@@ -10,7 +10,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfnmacc.nxv1f16.nxv1f16(
 define <vscale x 1 x half>  @intrinsic_vfnmacc_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfnmacc.mask.nxv1f16.nxv1f16(
 define <vscale x 1 x half>  @intrinsic_vfnmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfnmacc.nxv2f16.nxv2f16(
 define <vscale x 2 x half>  @intrinsic_vfnmacc_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv2f16_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfnmacc.mask.nxv2f16.nxv2f16(
 define <vscale x 2 x half>  @intrinsic_vfnmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfnmacc.nxv4f16.nxv4f16(
 define <vscale x 4 x half>  @intrinsic_vfnmacc_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfnmacc.mask.nxv4f16.nxv4f16(
 define <vscale x 4 x half>  @intrinsic_vfnmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfnmacc.nxv8f16.nxv8f16(
 define <vscale x 8 x half>  @intrinsic_vfnmacc_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv8f16_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v16, v18, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfnmacc.mask.nxv8f16.nxv8f16(
 define <vscale x 8 x half>  @intrinsic_vfnmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v16, v18, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -196,7 +196,7 @@ define <vscale x 16 x half>  @intrinsic_vfnmacc_vv_nxv16f16_nxv16f16_nxv16f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v16, v20, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -221,7 +221,7 @@ define <vscale x 16 x half>  @intrinsic_vfnmacc_mask_vv_nxv16f16_nxv16f16_nxv16f
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v16, v20, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -244,7 +244,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfnmacc.nxv1f32.nxv1f32(
 define <vscale x 1 x float>  @intrinsic_vfnmacc_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv1f32_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -267,7 +267,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfnmacc.mask.nxv1f32.nxv1f32(
 define <vscale x 1 x float>  @intrinsic_vfnmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -290,7 +290,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfnmacc.nxv2f32.nxv2f32(
 define <vscale x 2 x float>  @intrinsic_vfnmacc_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -313,7 +313,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfnmacc.mask.nxv2f32.nxv2f32(
 define <vscale x 2 x float>  @intrinsic_vfnmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -336,7 +336,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfnmacc.nxv4f32.nxv4f32(
 define <vscale x 4 x float>  @intrinsic_vfnmacc_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv4f32_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v16, v18, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -359,7 +359,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfnmacc.mask.nxv4f32.nxv4f32(
 define <vscale x 4 x float>  @intrinsic_vfnmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v16, v18, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -384,7 +384,7 @@ define <vscale x 8 x float>  @intrinsic_vfnmacc_vv_nxv8f32_nxv8f32_nxv8f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v16, v20, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -409,7 +409,7 @@ define <vscale x 8 x float>  @intrinsic_vfnmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v16, v20, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -433,7 +433,7 @@ define <vscale x 1 x half>  @intrinsic_vfnmacc_vf_nxv1f16_f16_nxv1f16(<vscale x
 ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv1f16_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -457,7 +457,7 @@ define <vscale x 1 x half> @intrinsic_vfnmacc_mask_vf_nxv1f16_f16_nxv1f16(<vscal
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv1f16_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -481,7 +481,7 @@ define <vscale x 2 x half>  @intrinsic_vfnmacc_vf_nxv2f16_f16_nxv2f16(<vscale x
 ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv2f16_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -505,7 +505,7 @@ define <vscale x 2 x half> @intrinsic_vfnmacc_mask_vf_nxv2f16_f16_nxv2f16(<vscal
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv2f16_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -529,7 +529,7 @@ define <vscale x 4 x half>  @intrinsic_vfnmacc_vf_nxv4f16_f16_nxv4f16(<vscale x
 ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv4f16_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -553,7 +553,7 @@ define <vscale x 4 x half> @intrinsic_vfnmacc_mask_vf_nxv4f16_f16_nxv4f16(<vscal
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv4f16_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -577,7 +577,7 @@ define <vscale x 8 x half>  @intrinsic_vfnmacc_vf_nxv8f16_f16_nxv8f16(<vscale x
 ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv8f16_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v16, ft0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -601,7 +601,7 @@ define <vscale x 8 x half> @intrinsic_vfnmacc_mask_vf_nxv8f16_f16_nxv8f16(<vscal
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv8f16_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v16, ft0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -625,7 +625,7 @@ define <vscale x 16 x half>  @intrinsic_vfnmacc_vf_nxv16f16_f16_nxv16f16(<vscale
 ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv16f16_f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v16, ft0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -649,7 +649,7 @@ define <vscale x 16 x half> @intrinsic_vfnmacc_mask_vf_nxv16f16_f16_nxv16f16(<vs
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv16f16_f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v16, ft0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -673,7 +673,7 @@ define <vscale x 1 x float>  @intrinsic_vfnmacc_vf_nxv1f32_f32_nxv1f32(<vscale x
 ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv1f32_f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -697,7 +697,7 @@ define <vscale x 1 x float> @intrinsic_vfnmacc_mask_vf_nxv1f32_f32_nxv1f32(<vsca
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv1f32_f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -721,7 +721,7 @@ define <vscale x 2 x float>  @intrinsic_vfnmacc_vf_nxv2f32_f32_nxv2f32(<vscale x
 ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv2f32_f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -745,7 +745,7 @@ define <vscale x 2 x float> @intrinsic_vfnmacc_mask_vf_nxv2f32_f32_nxv2f32(<vsca
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv2f32_f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -769,7 +769,7 @@ define <vscale x 4 x float>  @intrinsic_vfnmacc_vf_nxv4f32_f32_nxv4f32(<vscale x
 ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv4f32_f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v16, ft0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -793,7 +793,7 @@ define <vscale x 4 x float> @intrinsic_vfnmacc_mask_vf_nxv4f32_f32_nxv4f32(<vsca
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv4f32_f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v16, ft0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -817,7 +817,7 @@ define <vscale x 8 x float>  @intrinsic_vfnmacc_vf_nxv8f32_f32_nxv8f32(<vscale x
 ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv8f32_f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v16, ft0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -841,7 +841,7 @@ define <vscale x 8 x float> @intrinsic_vfnmacc_mask_vf_nxv8f32_f32_nxv8f32(<vsca
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv8f32_f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v16, ft0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv64.ll
index 9e113de8e9ca..c5fd7912a884 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv64.ll
@@ -10,7 +10,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfnmacc.nxv1f16.nxv1f16(
 define <vscale x 1 x half>  @intrinsic_vfnmacc_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfnmacc.mask.nxv1f16.nxv1f16(
 define <vscale x 1 x half>  @intrinsic_vfnmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfnmacc.nxv2f16.nxv2f16(
 define <vscale x 2 x half>  @intrinsic_vfnmacc_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv2f16_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfnmacc.mask.nxv2f16.nxv2f16(
 define <vscale x 2 x half>  @intrinsic_vfnmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfnmacc.nxv4f16.nxv4f16(
 define <vscale x 4 x half>  @intrinsic_vfnmacc_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfnmacc.mask.nxv4f16.nxv4f16(
 define <vscale x 4 x half>  @intrinsic_vfnmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfnmacc.nxv8f16.nxv8f16(
 define <vscale x 8 x half>  @intrinsic_vfnmacc_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv8f16_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v16, v18, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfnmacc.mask.nxv8f16.nxv8f16(
 define <vscale x 8 x half>  @intrinsic_vfnmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v16, v18, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -196,7 +196,7 @@ define <vscale x 16 x half>  @intrinsic_vfnmacc_vv_nxv16f16_nxv16f16_nxv16f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v16, v20, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -221,7 +221,7 @@ define <vscale x 16 x half>  @intrinsic_vfnmacc_mask_vv_nxv16f16_nxv16f16_nxv16f
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v16, v20, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -244,7 +244,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfnmacc.nxv1f32.nxv1f32(
 define <vscale x 1 x float>  @intrinsic_vfnmacc_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv1f32_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -267,7 +267,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfnmacc.mask.nxv1f32.nxv1f32(
 define <vscale x 1 x float>  @intrinsic_vfnmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -290,7 +290,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfnmacc.nxv2f32.nxv2f32(
 define <vscale x 2 x float>  @intrinsic_vfnmacc_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -313,7 +313,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfnmacc.mask.nxv2f32.nxv2f32(
 define <vscale x 2 x float>  @intrinsic_vfnmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -336,7 +336,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfnmacc.nxv4f32.nxv4f32(
 define <vscale x 4 x float>  @intrinsic_vfnmacc_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv4f32_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v16, v18, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -359,7 +359,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfnmacc.mask.nxv4f32.nxv4f32(
 define <vscale x 4 x float>  @intrinsic_vfnmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v16, v18, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -384,7 +384,7 @@ define <vscale x 8 x float>  @intrinsic_vfnmacc_vv_nxv8f32_nxv8f32_nxv8f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v16, v20, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -409,7 +409,7 @@ define <vscale x 8 x float>  @intrinsic_vfnmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v16, v20, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -432,7 +432,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfnmacc.nxv1f64.nxv1f64(
 define <vscale x 1 x double>  @intrinsic_vfnmacc_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -455,7 +455,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfnmacc.mask.nxv1f64.nxv1f64(
 define <vscale x 1 x double>  @intrinsic_vfnmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -478,7 +478,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfnmacc.nxv2f64.nxv2f64(
 define <vscale x 2 x double>  @intrinsic_vfnmacc_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv2f64_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v16, v18, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -501,7 +501,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfnmacc.mask.nxv2f64.nxv2f64(
 define <vscale x 2 x double>  @intrinsic_vfnmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v16, v18, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -526,7 +526,7 @@ define <vscale x 4 x double>  @intrinsic_vfnmacc_vv_nxv4f64_nxv4f64_nxv4f64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e64,m4,ta,mu
 ; CHECK-NEXT:    vle64.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v16, v20, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -551,7 +551,7 @@ define <vscale x 4 x double>  @intrinsic_vfnmacc_mask_vv_nxv4f64_nxv4f64_nxv4f64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e64,m4,ta,mu
 ; CHECK-NEXT:    vle64.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v16, v20, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -575,7 +575,7 @@ define <vscale x 1 x half>  @intrinsic_vfnmacc_vf_nxv1f16_f16_nxv1f16(<vscale x
 ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv1f16_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -599,7 +599,7 @@ define <vscale x 1 x half> @intrinsic_vfnmacc_mask_vf_nxv1f16_f16_nxv1f16(<vscal
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv1f16_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -623,7 +623,7 @@ define <vscale x 2 x half>  @intrinsic_vfnmacc_vf_nxv2f16_f16_nxv2f16(<vscale x
 ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv2f16_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -647,7 +647,7 @@ define <vscale x 2 x half> @intrinsic_vfnmacc_mask_vf_nxv2f16_f16_nxv2f16(<vscal
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv2f16_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -671,7 +671,7 @@ define <vscale x 4 x half>  @intrinsic_vfnmacc_vf_nxv4f16_f16_nxv4f16(<vscale x
 ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv4f16_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -695,7 +695,7 @@ define <vscale x 4 x half> @intrinsic_vfnmacc_mask_vf_nxv4f16_f16_nxv4f16(<vscal
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv4f16_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -719,7 +719,7 @@ define <vscale x 8 x half>  @intrinsic_vfnmacc_vf_nxv8f16_f16_nxv8f16(<vscale x
 ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv8f16_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v16, ft0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -743,7 +743,7 @@ define <vscale x 8 x half> @intrinsic_vfnmacc_mask_vf_nxv8f16_f16_nxv8f16(<vscal
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv8f16_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v16, ft0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -767,7 +767,7 @@ define <vscale x 16 x half>  @intrinsic_vfnmacc_vf_nxv16f16_f16_nxv16f16(<vscale
 ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv16f16_f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v16, ft0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -791,7 +791,7 @@ define <vscale x 16 x half> @intrinsic_vfnmacc_mask_vf_nxv16f16_f16_nxv16f16(<vs
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv16f16_f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v16, ft0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -815,7 +815,7 @@ define <vscale x 1 x float>  @intrinsic_vfnmacc_vf_nxv1f32_f32_nxv1f32(<vscale x
 ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv1f32_f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -839,7 +839,7 @@ define <vscale x 1 x float> @intrinsic_vfnmacc_mask_vf_nxv1f32_f32_nxv1f32(<vsca
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv1f32_f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -863,7 +863,7 @@ define <vscale x 2 x float>  @intrinsic_vfnmacc_vf_nxv2f32_f32_nxv2f32(<vscale x
 ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv2f32_f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -887,7 +887,7 @@ define <vscale x 2 x float> @intrinsic_vfnmacc_mask_vf_nxv2f32_f32_nxv2f32(<vsca
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv2f32_f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -911,7 +911,7 @@ define <vscale x 4 x float>  @intrinsic_vfnmacc_vf_nxv4f32_f32_nxv4f32(<vscale x
 ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv4f32_f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v16, ft0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -935,7 +935,7 @@ define <vscale x 4 x float> @intrinsic_vfnmacc_mask_vf_nxv4f32_f32_nxv4f32(<vsca
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv4f32_f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v16, ft0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -959,7 +959,7 @@ define <vscale x 8 x float>  @intrinsic_vfnmacc_vf_nxv8f32_f32_nxv8f32(<vscale x
 ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv8f32_f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v16, ft0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -983,7 +983,7 @@ define <vscale x 8 x float> @intrinsic_vfnmacc_mask_vf_nxv8f32_f32_nxv8f32(<vsca
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv8f32_f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v16, ft0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1007,7 +1007,7 @@ define <vscale x 1 x double>  @intrinsic_vfnmacc_vf_nxv1f64_f64_nxv1f64(<vscale
 ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv1f64_f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1031,7 +1031,7 @@ define <vscale x 1 x double> @intrinsic_vfnmacc_mask_vf_nxv1f64_f64_nxv1f64(<vsc
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv1f64_f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1055,7 +1055,7 @@ define <vscale x 2 x double>  @intrinsic_vfnmacc_vf_nxv2f64_f64_nxv2f64(<vscale
 ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv2f64_f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v16, ft0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1079,7 +1079,7 @@ define <vscale x 2 x double> @intrinsic_vfnmacc_mask_vf_nxv2f64_f64_nxv2f64(<vsc
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv2f64_f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v16, ft0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1103,7 +1103,7 @@ define <vscale x 4 x double>  @intrinsic_vfnmacc_vf_nxv4f64_f64_nxv4f64(<vscale
 ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv4f64_f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v16, ft0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1127,7 +1127,7 @@ define <vscale x 4 x double> @intrinsic_vfnmacc_mask_vf_nxv4f64_f64_nxv4f64(<vsc
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv4f64_f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v16, ft0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv32.ll
index abea8d688fa3..d1adf4ac4325 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv32.ll
@@ -10,7 +10,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfnmadd.nxv1f16.nxv1f16(
 define <vscale x 1 x half>  @intrinsic_vfnmadd_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfnmadd.mask.nxv1f16.nxv1f16(
 define <vscale x 1 x half>  @intrinsic_vfnmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfnmadd.nxv2f16.nxv2f16(
 define <vscale x 2 x half>  @intrinsic_vfnmadd_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv2f16_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfnmadd.mask.nxv2f16.nxv2f16(
 define <vscale x 2 x half>  @intrinsic_vfnmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfnmadd.nxv4f16.nxv4f16(
 define <vscale x 4 x half>  @intrinsic_vfnmadd_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfnmadd.mask.nxv4f16.nxv4f16(
 define <vscale x 4 x half>  @intrinsic_vfnmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfnmadd.nxv8f16.nxv8f16(
 define <vscale x 8 x half>  @intrinsic_vfnmadd_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv8f16_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v16, v18, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfnmadd.mask.nxv8f16.nxv8f16(
 define <vscale x 8 x half>  @intrinsic_vfnmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v16, v18, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -196,7 +196,7 @@ define <vscale x 16 x half>  @intrinsic_vfnmadd_vv_nxv16f16_nxv16f16_nxv16f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v16, v20, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -221,7 +221,7 @@ define <vscale x 16 x half>  @intrinsic_vfnmadd_mask_vv_nxv16f16_nxv16f16_nxv16f
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v16, v20, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -244,7 +244,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfnmadd.nxv1f32.nxv1f32(
 define <vscale x 1 x float>  @intrinsic_vfnmadd_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv1f32_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -267,7 +267,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfnmadd.mask.nxv1f32.nxv1f32(
 define <vscale x 1 x float>  @intrinsic_vfnmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -290,7 +290,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfnmadd.nxv2f32.nxv2f32(
 define <vscale x 2 x float>  @intrinsic_vfnmadd_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -313,7 +313,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfnmadd.mask.nxv2f32.nxv2f32(
 define <vscale x 2 x float>  @intrinsic_vfnmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -336,7 +336,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfnmadd.nxv4f32.nxv4f32(
 define <vscale x 4 x float>  @intrinsic_vfnmadd_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv4f32_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v16, v18, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -359,7 +359,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfnmadd.mask.nxv4f32.nxv4f32(
 define <vscale x 4 x float>  @intrinsic_vfnmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v16, v18, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -384,7 +384,7 @@ define <vscale x 8 x float>  @intrinsic_vfnmadd_vv_nxv8f32_nxv8f32_nxv8f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v16, v20, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -409,7 +409,7 @@ define <vscale x 8 x float>  @intrinsic_vfnmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v16, v20, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -433,7 +433,7 @@ define <vscale x 1 x half>  @intrinsic_vfnmadd_vf_nxv1f16_f16_nxv1f16(<vscale x
 ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv1f16_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -457,7 +457,7 @@ define <vscale x 1 x half> @intrinsic_vfnmadd_mask_vf_nxv1f16_f16_nxv1f16(<vscal
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv1f16_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -481,7 +481,7 @@ define <vscale x 2 x half>  @intrinsic_vfnmadd_vf_nxv2f16_f16_nxv2f16(<vscale x
 ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv2f16_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -505,7 +505,7 @@ define <vscale x 2 x half> @intrinsic_vfnmadd_mask_vf_nxv2f16_f16_nxv2f16(<vscal
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv2f16_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -529,7 +529,7 @@ define <vscale x 4 x half>  @intrinsic_vfnmadd_vf_nxv4f16_f16_nxv4f16(<vscale x
 ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv4f16_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -553,7 +553,7 @@ define <vscale x 4 x half> @intrinsic_vfnmadd_mask_vf_nxv4f16_f16_nxv4f16(<vscal
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv4f16_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -577,7 +577,7 @@ define <vscale x 8 x half>  @intrinsic_vfnmadd_vf_nxv8f16_f16_nxv8f16(<vscale x
 ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv8f16_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v16, ft0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -601,7 +601,7 @@ define <vscale x 8 x half> @intrinsic_vfnmadd_mask_vf_nxv8f16_f16_nxv8f16(<vscal
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv8f16_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v16, ft0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -625,7 +625,7 @@ define <vscale x 16 x half>  @intrinsic_vfnmadd_vf_nxv16f16_f16_nxv16f16(<vscale
 ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv16f16_f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v16, ft0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -649,7 +649,7 @@ define <vscale x 16 x half> @intrinsic_vfnmadd_mask_vf_nxv16f16_f16_nxv16f16(<vs
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv16f16_f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v16, ft0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -673,7 +673,7 @@ define <vscale x 1 x float>  @intrinsic_vfnmadd_vf_nxv1f32_f32_nxv1f32(<vscale x
 ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv1f32_f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -697,7 +697,7 @@ define <vscale x 1 x float> @intrinsic_vfnmadd_mask_vf_nxv1f32_f32_nxv1f32(<vsca
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv1f32_f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -721,7 +721,7 @@ define <vscale x 2 x float>  @intrinsic_vfnmadd_vf_nxv2f32_f32_nxv2f32(<vscale x
 ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv2f32_f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -745,7 +745,7 @@ define <vscale x 2 x float> @intrinsic_vfnmadd_mask_vf_nxv2f32_f32_nxv2f32(<vsca
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv2f32_f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -769,7 +769,7 @@ define <vscale x 4 x float>  @intrinsic_vfnmadd_vf_nxv4f32_f32_nxv4f32(<vscale x
 ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv4f32_f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v16, ft0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -793,7 +793,7 @@ define <vscale x 4 x float> @intrinsic_vfnmadd_mask_vf_nxv4f32_f32_nxv4f32(<vsca
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv4f32_f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v16, ft0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -817,7 +817,7 @@ define <vscale x 8 x float>  @intrinsic_vfnmadd_vf_nxv8f32_f32_nxv8f32(<vscale x
 ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv8f32_f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v16, ft0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -841,7 +841,7 @@ define <vscale x 8 x float> @intrinsic_vfnmadd_mask_vf_nxv8f32_f32_nxv8f32(<vsca
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv8f32_f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v16, ft0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv64.ll
index 4b4b8136fe5a..3bd3925dc0d5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv64.ll
@@ -10,7 +10,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfnmadd.nxv1f16.nxv1f16(
 define <vscale x 1 x half>  @intrinsic_vfnmadd_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfnmadd.mask.nxv1f16.nxv1f16(
 define <vscale x 1 x half>  @intrinsic_vfnmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfnmadd.nxv2f16.nxv2f16(
 define <vscale x 2 x half>  @intrinsic_vfnmadd_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv2f16_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfnmadd.mask.nxv2f16.nxv2f16(
 define <vscale x 2 x half>  @intrinsic_vfnmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfnmadd.nxv4f16.nxv4f16(
 define <vscale x 4 x half>  @intrinsic_vfnmadd_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfnmadd.mask.nxv4f16.nxv4f16(
 define <vscale x 4 x half>  @intrinsic_vfnmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfnmadd.nxv8f16.nxv8f16(
 define <vscale x 8 x half>  @intrinsic_vfnmadd_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv8f16_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v16, v18, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfnmadd.mask.nxv8f16.nxv8f16(
 define <vscale x 8 x half>  @intrinsic_vfnmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v16, v18, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -196,7 +196,7 @@ define <vscale x 16 x half>  @intrinsic_vfnmadd_vv_nxv16f16_nxv16f16_nxv16f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v16, v20, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -221,7 +221,7 @@ define <vscale x 16 x half>  @intrinsic_vfnmadd_mask_vv_nxv16f16_nxv16f16_nxv16f
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v16, v20, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -244,7 +244,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfnmadd.nxv1f32.nxv1f32(
 define <vscale x 1 x float>  @intrinsic_vfnmadd_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv1f32_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -267,7 +267,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfnmadd.mask.nxv1f32.nxv1f32(
 define <vscale x 1 x float>  @intrinsic_vfnmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -290,7 +290,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfnmadd.nxv2f32.nxv2f32(
 define <vscale x 2 x float>  @intrinsic_vfnmadd_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -313,7 +313,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfnmadd.mask.nxv2f32.nxv2f32(
 define <vscale x 2 x float>  @intrinsic_vfnmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -336,7 +336,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfnmadd.nxv4f32.nxv4f32(
 define <vscale x 4 x float>  @intrinsic_vfnmadd_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv4f32_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v16, v18, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -359,7 +359,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfnmadd.mask.nxv4f32.nxv4f32(
 define <vscale x 4 x float>  @intrinsic_vfnmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v16, v18, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -384,7 +384,7 @@ define <vscale x 8 x float>  @intrinsic_vfnmadd_vv_nxv8f32_nxv8f32_nxv8f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v16, v20, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -409,7 +409,7 @@ define <vscale x 8 x float>  @intrinsic_vfnmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v16, v20, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -432,7 +432,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfnmadd.nxv1f64.nxv1f64(
 define <vscale x 1 x double>  @intrinsic_vfnmadd_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -455,7 +455,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfnmadd.mask.nxv1f64.nxv1f64(
 define <vscale x 1 x double>  @intrinsic_vfnmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -478,7 +478,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfnmadd.nxv2f64.nxv2f64(
 define <vscale x 2 x double>  @intrinsic_vfnmadd_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv2f64_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v16, v18, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -501,7 +501,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfnmadd.mask.nxv2f64.nxv2f64(
 define <vscale x 2 x double>  @intrinsic_vfnmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v16, v18, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -526,7 +526,7 @@ define <vscale x 4 x double>  @intrinsic_vfnmadd_vv_nxv4f64_nxv4f64_nxv4f64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e64,m4,ta,mu
 ; CHECK-NEXT:    vle64.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v16, v20, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -551,7 +551,7 @@ define <vscale x 4 x double>  @intrinsic_vfnmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e64,m4,ta,mu
 ; CHECK-NEXT:    vle64.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v16, v20, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -575,7 +575,7 @@ define <vscale x 1 x half>  @intrinsic_vfnmadd_vf_nxv1f16_f16_nxv1f16(<vscale x
 ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv1f16_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -599,7 +599,7 @@ define <vscale x 1 x half> @intrinsic_vfnmadd_mask_vf_nxv1f16_f16_nxv1f16(<vscal
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv1f16_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -623,7 +623,7 @@ define <vscale x 2 x half>  @intrinsic_vfnmadd_vf_nxv2f16_f16_nxv2f16(<vscale x
 ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv2f16_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -647,7 +647,7 @@ define <vscale x 2 x half> @intrinsic_vfnmadd_mask_vf_nxv2f16_f16_nxv2f16(<vscal
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv2f16_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -671,7 +671,7 @@ define <vscale x 4 x half>  @intrinsic_vfnmadd_vf_nxv4f16_f16_nxv4f16(<vscale x
 ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv4f16_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -695,7 +695,7 @@ define <vscale x 4 x half> @intrinsic_vfnmadd_mask_vf_nxv4f16_f16_nxv4f16(<vscal
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv4f16_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -719,7 +719,7 @@ define <vscale x 8 x half>  @intrinsic_vfnmadd_vf_nxv8f16_f16_nxv8f16(<vscale x
 ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv8f16_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v16, ft0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -743,7 +743,7 @@ define <vscale x 8 x half> @intrinsic_vfnmadd_mask_vf_nxv8f16_f16_nxv8f16(<vscal
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv8f16_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v16, ft0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -767,7 +767,7 @@ define <vscale x 16 x half>  @intrinsic_vfnmadd_vf_nxv16f16_f16_nxv16f16(<vscale
 ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv16f16_f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v16, ft0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -791,7 +791,7 @@ define <vscale x 16 x half> @intrinsic_vfnmadd_mask_vf_nxv16f16_f16_nxv16f16(<vs
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv16f16_f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v16, ft0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -815,7 +815,7 @@ define <vscale x 1 x float>  @intrinsic_vfnmadd_vf_nxv1f32_f32_nxv1f32(<vscale x
 ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv1f32_f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -839,7 +839,7 @@ define <vscale x 1 x float> @intrinsic_vfnmadd_mask_vf_nxv1f32_f32_nxv1f32(<vsca
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv1f32_f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -863,7 +863,7 @@ define <vscale x 2 x float>  @intrinsic_vfnmadd_vf_nxv2f32_f32_nxv2f32(<vscale x
 ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv2f32_f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -887,7 +887,7 @@ define <vscale x 2 x float> @intrinsic_vfnmadd_mask_vf_nxv2f32_f32_nxv2f32(<vsca
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv2f32_f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -911,7 +911,7 @@ define <vscale x 4 x float>  @intrinsic_vfnmadd_vf_nxv4f32_f32_nxv4f32(<vscale x
 ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv4f32_f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v16, ft0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -935,7 +935,7 @@ define <vscale x 4 x float> @intrinsic_vfnmadd_mask_vf_nxv4f32_f32_nxv4f32(<vsca
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv4f32_f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v16, ft0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -959,7 +959,7 @@ define <vscale x 8 x float>  @intrinsic_vfnmadd_vf_nxv8f32_f32_nxv8f32(<vscale x
 ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv8f32_f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v16, ft0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -983,7 +983,7 @@ define <vscale x 8 x float> @intrinsic_vfnmadd_mask_vf_nxv8f32_f32_nxv8f32(<vsca
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv8f32_f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v16, ft0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1007,7 +1007,7 @@ define <vscale x 1 x double>  @intrinsic_vfnmadd_vf_nxv1f64_f64_nxv1f64(<vscale
 ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv1f64_f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1031,7 +1031,7 @@ define <vscale x 1 x double> @intrinsic_vfnmadd_mask_vf_nxv1f64_f64_nxv1f64(<vsc
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv1f64_f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1055,7 +1055,7 @@ define <vscale x 2 x double>  @intrinsic_vfnmadd_vf_nxv2f64_f64_nxv2f64(<vscale
 ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv2f64_f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v16, ft0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1079,7 +1079,7 @@ define <vscale x 2 x double> @intrinsic_vfnmadd_mask_vf_nxv2f64_f64_nxv2f64(<vsc
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv2f64_f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v16, ft0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1103,7 +1103,7 @@ define <vscale x 4 x double>  @intrinsic_vfnmadd_vf_nxv4f64_f64_nxv4f64(<vscale
 ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv4f64_f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v16, ft0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1127,7 +1127,7 @@ define <vscale x 4 x double> @intrinsic_vfnmadd_mask_vf_nxv4f64_f64_nxv4f64(<vsc
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv4f64_f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v16, ft0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv32.ll
index 7b090881a336..6a62afaaf7f7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv32.ll
@@ -10,7 +10,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfnmsac.nxv1f16.nxv1f16(
 define <vscale x 1 x half>  @intrinsic_vfnmsac_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfnmsac.mask.nxv1f16.nxv1f16(
 define <vscale x 1 x half>  @intrinsic_vfnmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfnmsac.nxv2f16.nxv2f16(
 define <vscale x 2 x half>  @intrinsic_vfnmsac_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv2f16_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfnmsac.mask.nxv2f16.nxv2f16(
 define <vscale x 2 x half>  @intrinsic_vfnmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfnmsac.nxv4f16.nxv4f16(
 define <vscale x 4 x half>  @intrinsic_vfnmsac_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfnmsac.mask.nxv4f16.nxv4f16(
 define <vscale x 4 x half>  @intrinsic_vfnmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfnmsac.nxv8f16.nxv8f16(
 define <vscale x 8 x half>  @intrinsic_vfnmsac_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv8f16_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v16, v18, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfnmsac.mask.nxv8f16.nxv8f16(
 define <vscale x 8 x half>  @intrinsic_vfnmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v16, v18, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -196,7 +196,7 @@ define <vscale x 16 x half>  @intrinsic_vfnmsac_vv_nxv16f16_nxv16f16_nxv16f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v16, v20, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -221,7 +221,7 @@ define <vscale x 16 x half>  @intrinsic_vfnmsac_mask_vv_nxv16f16_nxv16f16_nxv16f
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v16, v20, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -244,7 +244,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfnmsac.nxv1f32.nxv1f32(
 define <vscale x 1 x float>  @intrinsic_vfnmsac_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv1f32_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -267,7 +267,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfnmsac.mask.nxv1f32.nxv1f32(
 define <vscale x 1 x float>  @intrinsic_vfnmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -290,7 +290,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfnmsac.nxv2f32.nxv2f32(
 define <vscale x 2 x float>  @intrinsic_vfnmsac_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -313,7 +313,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfnmsac.mask.nxv2f32.nxv2f32(
 define <vscale x 2 x float>  @intrinsic_vfnmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -336,7 +336,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfnmsac.nxv4f32.nxv4f32(
 define <vscale x 4 x float>  @intrinsic_vfnmsac_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv4f32_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v16, v18, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -359,7 +359,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfnmsac.mask.nxv4f32.nxv4f32(
 define <vscale x 4 x float>  @intrinsic_vfnmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v16, v18, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -384,7 +384,7 @@ define <vscale x 8 x float>  @intrinsic_vfnmsac_vv_nxv8f32_nxv8f32_nxv8f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v16, v20, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -409,7 +409,7 @@ define <vscale x 8 x float>  @intrinsic_vfnmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v16, v20, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -433,7 +433,7 @@ define <vscale x 1 x half>  @intrinsic_vfnmsac_vf_nxv1f16_f16_nxv1f16(<vscale x
 ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv1f16_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -457,7 +457,7 @@ define <vscale x 1 x half> @intrinsic_vfnmsac_mask_vf_nxv1f16_f16_nxv1f16(<vscal
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv1f16_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -481,7 +481,7 @@ define <vscale x 2 x half>  @intrinsic_vfnmsac_vf_nxv2f16_f16_nxv2f16(<vscale x
 ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv2f16_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -505,7 +505,7 @@ define <vscale x 2 x half> @intrinsic_vfnmsac_mask_vf_nxv2f16_f16_nxv2f16(<vscal
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv2f16_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -529,7 +529,7 @@ define <vscale x 4 x half>  @intrinsic_vfnmsac_vf_nxv4f16_f16_nxv4f16(<vscale x
 ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv4f16_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -553,7 +553,7 @@ define <vscale x 4 x half> @intrinsic_vfnmsac_mask_vf_nxv4f16_f16_nxv4f16(<vscal
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv4f16_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -577,7 +577,7 @@ define <vscale x 8 x half>  @intrinsic_vfnmsac_vf_nxv8f16_f16_nxv8f16(<vscale x
 ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv8f16_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v16, ft0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -601,7 +601,7 @@ define <vscale x 8 x half> @intrinsic_vfnmsac_mask_vf_nxv8f16_f16_nxv8f16(<vscal
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv8f16_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v16, ft0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -625,7 +625,7 @@ define <vscale x 16 x half>  @intrinsic_vfnmsac_vf_nxv16f16_f16_nxv16f16(<vscale
 ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv16f16_f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v16, ft0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -649,7 +649,7 @@ define <vscale x 16 x half> @intrinsic_vfnmsac_mask_vf_nxv16f16_f16_nxv16f16(<vs
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv16f16_f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v16, ft0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -673,7 +673,7 @@ define <vscale x 1 x float>  @intrinsic_vfnmsac_vf_nxv1f32_f32_nxv1f32(<vscale x
 ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv1f32_f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -697,7 +697,7 @@ define <vscale x 1 x float> @intrinsic_vfnmsac_mask_vf_nxv1f32_f32_nxv1f32(<vsca
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv1f32_f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -721,7 +721,7 @@ define <vscale x 2 x float>  @intrinsic_vfnmsac_vf_nxv2f32_f32_nxv2f32(<vscale x
 ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv2f32_f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -745,7 +745,7 @@ define <vscale x 2 x float> @intrinsic_vfnmsac_mask_vf_nxv2f32_f32_nxv2f32(<vsca
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv2f32_f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -769,7 +769,7 @@ define <vscale x 4 x float>  @intrinsic_vfnmsac_vf_nxv4f32_f32_nxv4f32(<vscale x
 ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv4f32_f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v16, ft0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -793,7 +793,7 @@ define <vscale x 4 x float> @intrinsic_vfnmsac_mask_vf_nxv4f32_f32_nxv4f32(<vsca
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv4f32_f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v16, ft0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -817,7 +817,7 @@ define <vscale x 8 x float>  @intrinsic_vfnmsac_vf_nxv8f32_f32_nxv8f32(<vscale x
 ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv8f32_f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v16, ft0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -841,7 +841,7 @@ define <vscale x 8 x float> @intrinsic_vfnmsac_mask_vf_nxv8f32_f32_nxv8f32(<vsca
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv8f32_f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v16, ft0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv64.ll
index 23d83c29e247..011b71c5bb5b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv64.ll
@@ -10,7 +10,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfnmsac.nxv1f16.nxv1f16(
 define <vscale x 1 x half>  @intrinsic_vfnmsac_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfnmsac.mask.nxv1f16.nxv1f16(
 define <vscale x 1 x half>  @intrinsic_vfnmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfnmsac.nxv2f16.nxv2f16(
 define <vscale x 2 x half>  @intrinsic_vfnmsac_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv2f16_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfnmsac.mask.nxv2f16.nxv2f16(
 define <vscale x 2 x half>  @intrinsic_vfnmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfnmsac.nxv4f16.nxv4f16(
 define <vscale x 4 x half>  @intrinsic_vfnmsac_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfnmsac.mask.nxv4f16.nxv4f16(
 define <vscale x 4 x half>  @intrinsic_vfnmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfnmsac.nxv8f16.nxv8f16(
 define <vscale x 8 x half>  @intrinsic_vfnmsac_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv8f16_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v16, v18, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfnmsac.mask.nxv8f16.nxv8f16(
 define <vscale x 8 x half>  @intrinsic_vfnmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v16, v18, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -196,7 +196,7 @@ define <vscale x 16 x half>  @intrinsic_vfnmsac_vv_nxv16f16_nxv16f16_nxv16f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v16, v20, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -221,7 +221,7 @@ define <vscale x 16 x half>  @intrinsic_vfnmsac_mask_vv_nxv16f16_nxv16f16_nxv16f
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v16, v20, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -244,7 +244,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfnmsac.nxv1f32.nxv1f32(
 define <vscale x 1 x float>  @intrinsic_vfnmsac_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv1f32_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -267,7 +267,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfnmsac.mask.nxv1f32.nxv1f32(
 define <vscale x 1 x float>  @intrinsic_vfnmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -290,7 +290,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfnmsac.nxv2f32.nxv2f32(
 define <vscale x 2 x float>  @intrinsic_vfnmsac_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -313,7 +313,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfnmsac.mask.nxv2f32.nxv2f32(
 define <vscale x 2 x float>  @intrinsic_vfnmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -336,7 +336,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfnmsac.nxv4f32.nxv4f32(
 define <vscale x 4 x float>  @intrinsic_vfnmsac_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv4f32_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v16, v18, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -359,7 +359,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfnmsac.mask.nxv4f32.nxv4f32(
 define <vscale x 4 x float>  @intrinsic_vfnmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v16, v18, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -384,7 +384,7 @@ define <vscale x 8 x float>  @intrinsic_vfnmsac_vv_nxv8f32_nxv8f32_nxv8f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v16, v20, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -409,7 +409,7 @@ define <vscale x 8 x float>  @intrinsic_vfnmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v16, v20, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -432,7 +432,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfnmsac.nxv1f64.nxv1f64(
 define <vscale x 1 x double>  @intrinsic_vfnmsac_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -455,7 +455,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfnmsac.mask.nxv1f64.nxv1f64(
 define <vscale x 1 x double>  @intrinsic_vfnmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -478,7 +478,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfnmsac.nxv2f64.nxv2f64(
 define <vscale x 2 x double>  @intrinsic_vfnmsac_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv2f64_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v16, v18, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -501,7 +501,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfnmsac.mask.nxv2f64.nxv2f64(
 define <vscale x 2 x double>  @intrinsic_vfnmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v16, v18, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -526,7 +526,7 @@ define <vscale x 4 x double>  @intrinsic_vfnmsac_vv_nxv4f64_nxv4f64_nxv4f64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e64,m4,ta,mu
 ; CHECK-NEXT:    vle64.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v16, v20, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -551,7 +551,7 @@ define <vscale x 4 x double>  @intrinsic_vfnmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e64,m4,ta,mu
 ; CHECK-NEXT:    vle64.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v16, v20, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -575,7 +575,7 @@ define <vscale x 1 x half>  @intrinsic_vfnmsac_vf_nxv1f16_f16_nxv1f16(<vscale x
 ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv1f16_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -599,7 +599,7 @@ define <vscale x 1 x half> @intrinsic_vfnmsac_mask_vf_nxv1f16_f16_nxv1f16(<vscal
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv1f16_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -623,7 +623,7 @@ define <vscale x 2 x half>  @intrinsic_vfnmsac_vf_nxv2f16_f16_nxv2f16(<vscale x
 ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv2f16_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -647,7 +647,7 @@ define <vscale x 2 x half> @intrinsic_vfnmsac_mask_vf_nxv2f16_f16_nxv2f16(<vscal
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv2f16_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -671,7 +671,7 @@ define <vscale x 4 x half>  @intrinsic_vfnmsac_vf_nxv4f16_f16_nxv4f16(<vscale x
 ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv4f16_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -695,7 +695,7 @@ define <vscale x 4 x half> @intrinsic_vfnmsac_mask_vf_nxv4f16_f16_nxv4f16(<vscal
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv4f16_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -719,7 +719,7 @@ define <vscale x 8 x half>  @intrinsic_vfnmsac_vf_nxv8f16_f16_nxv8f16(<vscale x
 ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv8f16_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v16, ft0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -743,7 +743,7 @@ define <vscale x 8 x half> @intrinsic_vfnmsac_mask_vf_nxv8f16_f16_nxv8f16(<vscal
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv8f16_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v16, ft0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -767,7 +767,7 @@ define <vscale x 16 x half>  @intrinsic_vfnmsac_vf_nxv16f16_f16_nxv16f16(<vscale
 ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv16f16_f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v16, ft0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -791,7 +791,7 @@ define <vscale x 16 x half> @intrinsic_vfnmsac_mask_vf_nxv16f16_f16_nxv16f16(<vs
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv16f16_f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v16, ft0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -815,7 +815,7 @@ define <vscale x 1 x float>  @intrinsic_vfnmsac_vf_nxv1f32_f32_nxv1f32(<vscale x
 ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv1f32_f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -839,7 +839,7 @@ define <vscale x 1 x float> @intrinsic_vfnmsac_mask_vf_nxv1f32_f32_nxv1f32(<vsca
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv1f32_f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -863,7 +863,7 @@ define <vscale x 2 x float>  @intrinsic_vfnmsac_vf_nxv2f32_f32_nxv2f32(<vscale x
 ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv2f32_f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -887,7 +887,7 @@ define <vscale x 2 x float> @intrinsic_vfnmsac_mask_vf_nxv2f32_f32_nxv2f32(<vsca
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv2f32_f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -911,7 +911,7 @@ define <vscale x 4 x float>  @intrinsic_vfnmsac_vf_nxv4f32_f32_nxv4f32(<vscale x
 ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv4f32_f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v16, ft0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -935,7 +935,7 @@ define <vscale x 4 x float> @intrinsic_vfnmsac_mask_vf_nxv4f32_f32_nxv4f32(<vsca
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv4f32_f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v16, ft0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -959,7 +959,7 @@ define <vscale x 8 x float>  @intrinsic_vfnmsac_vf_nxv8f32_f32_nxv8f32(<vscale x
 ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv8f32_f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v16, ft0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -983,7 +983,7 @@ define <vscale x 8 x float> @intrinsic_vfnmsac_mask_vf_nxv8f32_f32_nxv8f32(<vsca
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv8f32_f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v16, ft0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1007,7 +1007,7 @@ define <vscale x 1 x double>  @intrinsic_vfnmsac_vf_nxv1f64_f64_nxv1f64(<vscale
 ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv1f64_f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1031,7 +1031,7 @@ define <vscale x 1 x double> @intrinsic_vfnmsac_mask_vf_nxv1f64_f64_nxv1f64(<vsc
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv1f64_f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1055,7 +1055,7 @@ define <vscale x 2 x double>  @intrinsic_vfnmsac_vf_nxv2f64_f64_nxv2f64(<vscale
 ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv2f64_f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v16, ft0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1079,7 +1079,7 @@ define <vscale x 2 x double> @intrinsic_vfnmsac_mask_vf_nxv2f64_f64_nxv2f64(<vsc
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv2f64_f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v16, ft0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1103,7 +1103,7 @@ define <vscale x 4 x double>  @intrinsic_vfnmsac_vf_nxv4f64_f64_nxv4f64(<vscale
 ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv4f64_f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v16, ft0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1127,7 +1127,7 @@ define <vscale x 4 x double> @intrinsic_vfnmsac_mask_vf_nxv4f64_f64_nxv4f64(<vsc
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv4f64_f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v16, ft0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv32.ll
index 4cbeb71a8653..b6a2a90aea75 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv32.ll
@@ -10,7 +10,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfnmsub.nxv1f16.nxv1f16(
 define <vscale x 1 x half>  @intrinsic_vfnmsub_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfnmsub.mask.nxv1f16.nxv1f16(
 define <vscale x 1 x half>  @intrinsic_vfnmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfnmsub.nxv2f16.nxv2f16(
 define <vscale x 2 x half>  @intrinsic_vfnmsub_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv2f16_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfnmsub.mask.nxv2f16.nxv2f16(
 define <vscale x 2 x half>  @intrinsic_vfnmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfnmsub.nxv4f16.nxv4f16(
 define <vscale x 4 x half>  @intrinsic_vfnmsub_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfnmsub.mask.nxv4f16.nxv4f16(
 define <vscale x 4 x half>  @intrinsic_vfnmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfnmsub.nxv8f16.nxv8f16(
 define <vscale x 8 x half>  @intrinsic_vfnmsub_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv8f16_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v16, v18, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfnmsub.mask.nxv8f16.nxv8f16(
 define <vscale x 8 x half>  @intrinsic_vfnmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v16, v18, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -196,7 +196,7 @@ define <vscale x 16 x half>  @intrinsic_vfnmsub_vv_nxv16f16_nxv16f16_nxv16f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v16, v20, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -221,7 +221,7 @@ define <vscale x 16 x half>  @intrinsic_vfnmsub_mask_vv_nxv16f16_nxv16f16_nxv16f
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v16, v20, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -244,7 +244,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfnmsub.nxv1f32.nxv1f32(
 define <vscale x 1 x float>  @intrinsic_vfnmsub_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1f32_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -267,7 +267,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfnmsub.mask.nxv1f32.nxv1f32(
 define <vscale x 1 x float>  @intrinsic_vfnmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -290,7 +290,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfnmsub.nxv2f32.nxv2f32(
 define <vscale x 2 x float>  @intrinsic_vfnmsub_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -313,7 +313,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfnmsub.mask.nxv2f32.nxv2f32(
 define <vscale x 2 x float>  @intrinsic_vfnmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -336,7 +336,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfnmsub.nxv4f32.nxv4f32(
 define <vscale x 4 x float>  @intrinsic_vfnmsub_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv4f32_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v16, v18, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -359,7 +359,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfnmsub.mask.nxv4f32.nxv4f32(
 define <vscale x 4 x float>  @intrinsic_vfnmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v16, v18, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -384,7 +384,7 @@ define <vscale x 8 x float>  @intrinsic_vfnmsub_vv_nxv8f32_nxv8f32_nxv8f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v16, v20, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -409,7 +409,7 @@ define <vscale x 8 x float>  @intrinsic_vfnmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v16, v20, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -433,7 +433,7 @@ define <vscale x 1 x half>  @intrinsic_vfnmsub_vf_nxv1f16_f16_nxv1f16(<vscale x
 ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1f16_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -457,7 +457,7 @@ define <vscale x 1 x half> @intrinsic_vfnmsub_mask_vf_nxv1f16_f16_nxv1f16(<vscal
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1f16_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -481,7 +481,7 @@ define <vscale x 2 x half>  @intrinsic_vfnmsub_vf_nxv2f16_f16_nxv2f16(<vscale x
 ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv2f16_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -505,7 +505,7 @@ define <vscale x 2 x half> @intrinsic_vfnmsub_mask_vf_nxv2f16_f16_nxv2f16(<vscal
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2f16_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -529,7 +529,7 @@ define <vscale x 4 x half>  @intrinsic_vfnmsub_vf_nxv4f16_f16_nxv4f16(<vscale x
 ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv4f16_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -553,7 +553,7 @@ define <vscale x 4 x half> @intrinsic_vfnmsub_mask_vf_nxv4f16_f16_nxv4f16(<vscal
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4f16_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -577,7 +577,7 @@ define <vscale x 8 x half>  @intrinsic_vfnmsub_vf_nxv8f16_f16_nxv8f16(<vscale x
 ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv8f16_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v16, ft0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -601,7 +601,7 @@ define <vscale x 8 x half> @intrinsic_vfnmsub_mask_vf_nxv8f16_f16_nxv8f16(<vscal
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv8f16_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v16, ft0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -625,7 +625,7 @@ define <vscale x 16 x half>  @intrinsic_vfnmsub_vf_nxv16f16_f16_nxv16f16(<vscale
 ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv16f16_f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v16, ft0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -649,7 +649,7 @@ define <vscale x 16 x half> @intrinsic_vfnmsub_mask_vf_nxv16f16_f16_nxv16f16(<vs
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv16f16_f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v16, ft0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -673,7 +673,7 @@ define <vscale x 1 x float>  @intrinsic_vfnmsub_vf_nxv1f32_f32_nxv1f32(<vscale x
 ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1f32_f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -697,7 +697,7 @@ define <vscale x 1 x float> @intrinsic_vfnmsub_mask_vf_nxv1f32_f32_nxv1f32(<vsca
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1f32_f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -721,7 +721,7 @@ define <vscale x 2 x float>  @intrinsic_vfnmsub_vf_nxv2f32_f32_nxv2f32(<vscale x
 ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv2f32_f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -745,7 +745,7 @@ define <vscale x 2 x float> @intrinsic_vfnmsub_mask_vf_nxv2f32_f32_nxv2f32(<vsca
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2f32_f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -769,7 +769,7 @@ define <vscale x 4 x float>  @intrinsic_vfnmsub_vf_nxv4f32_f32_nxv4f32(<vscale x
 ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv4f32_f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v16, ft0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -793,7 +793,7 @@ define <vscale x 4 x float> @intrinsic_vfnmsub_mask_vf_nxv4f32_f32_nxv4f32(<vsca
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4f32_f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v16, ft0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -817,7 +817,7 @@ define <vscale x 8 x float>  @intrinsic_vfnmsub_vf_nxv8f32_f32_nxv8f32(<vscale x
 ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv8f32_f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v16, ft0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -841,7 +841,7 @@ define <vscale x 8 x float> @intrinsic_vfnmsub_mask_vf_nxv8f32_f32_nxv8f32(<vsca
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv8f32_f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v16, ft0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv64.ll
index c25fee55ba36..bf88e4d15430 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv64.ll
@@ -10,7 +10,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfnmsub.nxv1f16.nxv1f16(
 define <vscale x 1 x half>  @intrinsic_vfnmsub_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfnmsub.mask.nxv1f16.nxv1f16(
 define <vscale x 1 x half>  @intrinsic_vfnmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfnmsub.nxv2f16.nxv2f16(
 define <vscale x 2 x half>  @intrinsic_vfnmsub_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv2f16_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfnmsub.mask.nxv2f16.nxv2f16(
 define <vscale x 2 x half>  @intrinsic_vfnmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfnmsub.nxv4f16.nxv4f16(
 define <vscale x 4 x half>  @intrinsic_vfnmsub_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfnmsub.mask.nxv4f16.nxv4f16(
 define <vscale x 4 x half>  @intrinsic_vfnmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfnmsub.nxv8f16.nxv8f16(
 define <vscale x 8 x half>  @intrinsic_vfnmsub_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv8f16_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v16, v18, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfnmsub.mask.nxv8f16.nxv8f16(
 define <vscale x 8 x half>  @intrinsic_vfnmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v16, v18, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -196,7 +196,7 @@ define <vscale x 16 x half>  @intrinsic_vfnmsub_vv_nxv16f16_nxv16f16_nxv16f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v16, v20, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -221,7 +221,7 @@ define <vscale x 16 x half>  @intrinsic_vfnmsub_mask_vv_nxv16f16_nxv16f16_nxv16f
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v16, v20, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -244,7 +244,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfnmsub.nxv1f32.nxv1f32(
 define <vscale x 1 x float>  @intrinsic_vfnmsub_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1f32_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -267,7 +267,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfnmsub.mask.nxv1f32.nxv1f32(
 define <vscale x 1 x float>  @intrinsic_vfnmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -290,7 +290,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfnmsub.nxv2f32.nxv2f32(
 define <vscale x 2 x float>  @intrinsic_vfnmsub_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -313,7 +313,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfnmsub.mask.nxv2f32.nxv2f32(
 define <vscale x 2 x float>  @intrinsic_vfnmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -336,7 +336,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfnmsub.nxv4f32.nxv4f32(
 define <vscale x 4 x float>  @intrinsic_vfnmsub_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv4f32_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v16, v18, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -359,7 +359,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfnmsub.mask.nxv4f32.nxv4f32(
 define <vscale x 4 x float>  @intrinsic_vfnmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v16, v18, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -384,7 +384,7 @@ define <vscale x 8 x float>  @intrinsic_vfnmsub_vv_nxv8f32_nxv8f32_nxv8f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v16, v20, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -409,7 +409,7 @@ define <vscale x 8 x float>  @intrinsic_vfnmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v16, v20, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -432,7 +432,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfnmsub.nxv1f64.nxv1f64(
 define <vscale x 1 x double>  @intrinsic_vfnmsub_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -455,7 +455,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfnmsub.mask.nxv1f64.nxv1f64(
 define <vscale x 1 x double>  @intrinsic_vfnmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -478,7 +478,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfnmsub.nxv2f64.nxv2f64(
 define <vscale x 2 x double>  @intrinsic_vfnmsub_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv2f64_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v16, v18, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -501,7 +501,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfnmsub.mask.nxv2f64.nxv2f64(
 define <vscale x 2 x double>  @intrinsic_vfnmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v16, v18, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -526,7 +526,7 @@ define <vscale x 4 x double>  @intrinsic_vfnmsub_vv_nxv4f64_nxv4f64_nxv4f64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e64,m4,ta,mu
 ; CHECK-NEXT:    vle64.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v16, v20, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -551,7 +551,7 @@ define <vscale x 4 x double>  @intrinsic_vfnmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e64,m4,ta,mu
 ; CHECK-NEXT:    vle64.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v16, v20, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -575,7 +575,7 @@ define <vscale x 1 x half>  @intrinsic_vfnmsub_vf_nxv1f16_f16_nxv1f16(<vscale x
 ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1f16_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -599,7 +599,7 @@ define <vscale x 1 x half> @intrinsic_vfnmsub_mask_vf_nxv1f16_f16_nxv1f16(<vscal
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1f16_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -623,7 +623,7 @@ define <vscale x 2 x half>  @intrinsic_vfnmsub_vf_nxv2f16_f16_nxv2f16(<vscale x
 ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv2f16_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -647,7 +647,7 @@ define <vscale x 2 x half> @intrinsic_vfnmsub_mask_vf_nxv2f16_f16_nxv2f16(<vscal
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2f16_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -671,7 +671,7 @@ define <vscale x 4 x half>  @intrinsic_vfnmsub_vf_nxv4f16_f16_nxv4f16(<vscale x
 ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv4f16_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -695,7 +695,7 @@ define <vscale x 4 x half> @intrinsic_vfnmsub_mask_vf_nxv4f16_f16_nxv4f16(<vscal
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4f16_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -719,7 +719,7 @@ define <vscale x 8 x half>  @intrinsic_vfnmsub_vf_nxv8f16_f16_nxv8f16(<vscale x
 ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv8f16_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v16, ft0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -743,7 +743,7 @@ define <vscale x 8 x half> @intrinsic_vfnmsub_mask_vf_nxv8f16_f16_nxv8f16(<vscal
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv8f16_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v16, ft0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -767,7 +767,7 @@ define <vscale x 16 x half>  @intrinsic_vfnmsub_vf_nxv16f16_f16_nxv16f16(<vscale
 ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv16f16_f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v16, ft0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -791,7 +791,7 @@ define <vscale x 16 x half> @intrinsic_vfnmsub_mask_vf_nxv16f16_f16_nxv16f16(<vs
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv16f16_f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v16, ft0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -815,7 +815,7 @@ define <vscale x 1 x float>  @intrinsic_vfnmsub_vf_nxv1f32_f32_nxv1f32(<vscale x
 ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1f32_f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -839,7 +839,7 @@ define <vscale x 1 x float> @intrinsic_vfnmsub_mask_vf_nxv1f32_f32_nxv1f32(<vsca
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1f32_f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -863,7 +863,7 @@ define <vscale x 2 x float>  @intrinsic_vfnmsub_vf_nxv2f32_f32_nxv2f32(<vscale x
 ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv2f32_f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -887,7 +887,7 @@ define <vscale x 2 x float> @intrinsic_vfnmsub_mask_vf_nxv2f32_f32_nxv2f32(<vsca
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2f32_f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -911,7 +911,7 @@ define <vscale x 4 x float>  @intrinsic_vfnmsub_vf_nxv4f32_f32_nxv4f32(<vscale x
 ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv4f32_f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v16, ft0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -935,7 +935,7 @@ define <vscale x 4 x float> @intrinsic_vfnmsub_mask_vf_nxv4f32_f32_nxv4f32(<vsca
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4f32_f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v16, ft0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -959,7 +959,7 @@ define <vscale x 8 x float>  @intrinsic_vfnmsub_vf_nxv8f32_f32_nxv8f32(<vscale x
 ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv8f32_f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v16, ft0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -983,7 +983,7 @@ define <vscale x 8 x float> @intrinsic_vfnmsub_mask_vf_nxv8f32_f32_nxv8f32(<vsca
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv8f32_f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v16, ft0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1007,7 +1007,7 @@ define <vscale x 1 x double>  @intrinsic_vfnmsub_vf_nxv1f64_f64_nxv1f64(<vscale
 ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1f64_f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1031,7 +1031,7 @@ define <vscale x 1 x double> @intrinsic_vfnmsub_mask_vf_nxv1f64_f64_nxv1f64(<vsc
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1f64_f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1055,7 +1055,7 @@ define <vscale x 2 x double>  @intrinsic_vfnmsub_vf_nxv2f64_f64_nxv2f64(<vscale
 ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv2f64_f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v16, ft0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1079,7 +1079,7 @@ define <vscale x 2 x double> @intrinsic_vfnmsub_mask_vf_nxv2f64_f64_nxv2f64(<vsc
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2f64_f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v16, ft0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1103,7 +1103,7 @@ define <vscale x 4 x double>  @intrinsic_vfnmsub_vf_nxv4f64_f64_nxv4f64(<vscale
 ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv4f64_f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v16, ft0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1127,7 +1127,7 @@ define <vscale x 4 x double> @intrinsic_vfnmsub_mask_vf_nxv4f64_f64_nxv4f64(<vsc
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4f64_f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v16, ft0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv32.ll
index b08d0c4bd918..1c5a31853a19 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfrdiv.mask.nxv1f16.f16(
 define <vscale x 1 x half> @intrinsic_vfrdiv_mask_vf_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vfrdiv.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfrdiv.mask.nxv2f16.f16(
 define <vscale x 2 x half> @intrinsic_vfrdiv_mask_vf_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv2f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vfrdiv.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfrdiv.mask.nxv4f16.f16(
 define <vscale x 4 x half> @intrinsic_vfrdiv_mask_vf_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv4f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vfrdiv.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfrdiv.mask.nxv8f16.f16(
 define <vscale x 8 x half> @intrinsic_vfrdiv_mask_vf_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv8f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vfrdiv.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfrdiv.mask.nxv16f16.f16(
 define <vscale x 16 x half> @intrinsic_vfrdiv_mask_vf_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv16f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vfrdiv.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x half> @llvm.riscv.vfrdiv.mask.nxv32f16.f16(
 define <vscale x 32 x half> @intrinsic_vfrdiv_mask_vf_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv32f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 32 x half> @llvm.riscv.vfrdiv.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -268,7 +268,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfrdiv.mask.nxv1f32.f32(
 define <vscale x 1 x float> @intrinsic_vfrdiv_mask_vf_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfrdiv.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -308,7 +308,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfrdiv.mask.nxv2f32.f32(
 define <vscale x 2 x float> @intrinsic_vfrdiv_mask_vf_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv2f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfrdiv.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -348,7 +348,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfrdiv.mask.nxv4f32.f32(
 define <vscale x 4 x float> @intrinsic_vfrdiv_mask_vf_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv4f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfrdiv.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -388,7 +388,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfrdiv.mask.nxv8f32.f32(
 define <vscale x 8 x float> @intrinsic_vfrdiv_mask_vf_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv8f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfrdiv.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -428,7 +428,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfrdiv.mask.nxv16f32.f32(
 define <vscale x 16 x float> @intrinsic_vfrdiv_mask_vf_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv16f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfrdiv.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv64.ll
index 176bd9061b06..41354bb7ca4a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfrdiv.mask.nxv1f16.f16(
 define <vscale x 1 x half> @intrinsic_vfrdiv_mask_vf_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vfrdiv.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfrdiv.mask.nxv2f16.f16(
 define <vscale x 2 x half> @intrinsic_vfrdiv_mask_vf_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv2f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vfrdiv.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfrdiv.mask.nxv4f16.f16(
 define <vscale x 4 x half> @intrinsic_vfrdiv_mask_vf_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv4f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vfrdiv.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfrdiv.mask.nxv8f16.f16(
 define <vscale x 8 x half> @intrinsic_vfrdiv_mask_vf_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv8f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vfrdiv.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfrdiv.mask.nxv16f16.f16(
 define <vscale x 16 x half> @intrinsic_vfrdiv_mask_vf_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv16f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vfrdiv.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x half> @llvm.riscv.vfrdiv.mask.nxv32f16.f16(
 define <vscale x 32 x half> @intrinsic_vfrdiv_mask_vf_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv32f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 32 x half> @llvm.riscv.vfrdiv.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -268,7 +268,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfrdiv.mask.nxv1f32.f32(
 define <vscale x 1 x float> @intrinsic_vfrdiv_mask_vf_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfrdiv.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -308,7 +308,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfrdiv.mask.nxv2f32.f32(
 define <vscale x 2 x float> @intrinsic_vfrdiv_mask_vf_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv2f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfrdiv.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -348,7 +348,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfrdiv.mask.nxv4f32.f32(
 define <vscale x 4 x float> @intrinsic_vfrdiv_mask_vf_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv4f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfrdiv.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -388,7 +388,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfrdiv.mask.nxv8f32.f32(
 define <vscale x 8 x float> @intrinsic_vfrdiv_mask_vf_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv8f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfrdiv.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -428,7 +428,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfrdiv.mask.nxv16f32.f32(
 define <vscale x 16 x float> @intrinsic_vfrdiv_mask_vf_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv16f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfrdiv.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -468,7 +468,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfrdiv.mask.nxv1f64.f64(
 define <vscale x 1 x double> @intrinsic_vfrdiv_mask_vf_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f64_f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x double> @llvm.riscv.vfrdiv.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -508,7 +508,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfrdiv.mask.nxv2f64.f64(
 define <vscale x 2 x double> @intrinsic_vfrdiv_mask_vf_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv2f64_f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x double> @llvm.riscv.vfrdiv.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -548,7 +548,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfrdiv.mask.nxv4f64.f64(
 define <vscale x 4 x double> @intrinsic_vfrdiv_mask_vf_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv4f64_f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x double> @llvm.riscv.vfrdiv.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -588,7 +588,7 @@ declare <vscale x 8 x double> @llvm.riscv.vfrdiv.mask.nxv8f64.f64(
 define <vscale x 8 x double> @intrinsic_vfrdiv_mask_vf_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv8f64_f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x double> @llvm.riscv.vfrdiv.mask.nxv8f64.f64(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv32.ll
index 509eb2da1b34..a6196865464d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv32.ll
@@ -29,7 +29,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfrsub.mask.nxv1f16.f16(
 define <vscale x 1 x half> @intrinsic_vfrsub_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv1f16_nxv1f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vfrsub.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -69,7 +69,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfrsub.mask.nxv2f16.f16(
 define <vscale x 2 x half> @intrinsic_vfrsub_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv2f16_nxv2f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vfrsub.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -109,7 +109,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfrsub.mask.nxv4f16.f16(
 define <vscale x 4 x half> @intrinsic_vfrsub_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv4f16_nxv4f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vfrsub.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -149,7 +149,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfrsub.mask.nxv8f16.f16(
 define <vscale x 8 x half> @intrinsic_vfrsub_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv8f16_nxv8f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vfrsub.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -189,7 +189,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfrsub.mask.nxv16f16.f16(
 define <vscale x 16 x half> @intrinsic_vfrsub_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv16f16_nxv16f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vfrsub.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -229,7 +229,7 @@ declare <vscale x 32 x half> @llvm.riscv.vfrsub.mask.nxv32f16.f16(
 define <vscale x 32 x half> @intrinsic_vfrsub_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv32f16_nxv32f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 32 x half> @llvm.riscv.vfrsub.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -269,7 +269,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfrsub.mask.nxv1f32.f32(
 define <vscale x 1 x float> @intrinsic_vfrsub_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv1f32_nxv1f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfrsub.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -309,7 +309,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfrsub.mask.nxv2f32.f32(
 define <vscale x 2 x float> @intrinsic_vfrsub_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv2f32_nxv2f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfrsub.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -349,7 +349,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfrsub.mask.nxv4f32.f32(
 define <vscale x 4 x float> @intrinsic_vfrsub_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv4f32_nxv4f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfrsub.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -389,7 +389,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfrsub.mask.nxv8f32.f32(
 define <vscale x 8 x float> @intrinsic_vfrsub_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv8f32_nxv8f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfrsub.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -429,7 +429,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfrsub.mask.nxv16f32.f32(
 define <vscale x 16 x float> @intrinsic_vfrsub_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv16f32_nxv16f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfrsub.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv64.ll
index 32d1736c7fc0..092fbea1a922 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv64.ll
@@ -29,7 +29,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfrsub.mask.nxv1f16.f16(
 define <vscale x 1 x half> @intrinsic_vfrsub_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv1f16_nxv1f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vfrsub.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -69,7 +69,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfrsub.mask.nxv2f16.f16(
 define <vscale x 2 x half> @intrinsic_vfrsub_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv2f16_nxv2f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vfrsub.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -109,7 +109,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfrsub.mask.nxv4f16.f16(
 define <vscale x 4 x half> @intrinsic_vfrsub_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv4f16_nxv4f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vfrsub.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -149,7 +149,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfrsub.mask.nxv8f16.f16(
 define <vscale x 8 x half> @intrinsic_vfrsub_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv8f16_nxv8f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vfrsub.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -189,7 +189,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfrsub.mask.nxv16f16.f16(
 define <vscale x 16 x half> @intrinsic_vfrsub_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv16f16_nxv16f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vfrsub.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -229,7 +229,7 @@ declare <vscale x 32 x half> @llvm.riscv.vfrsub.mask.nxv32f16.f16(
 define <vscale x 32 x half> @intrinsic_vfrsub_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv32f16_nxv32f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 32 x half> @llvm.riscv.vfrsub.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -269,7 +269,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfrsub.mask.nxv1f32.f32(
 define <vscale x 1 x float> @intrinsic_vfrsub_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv1f32_nxv1f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfrsub.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -309,7 +309,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfrsub.mask.nxv2f32.f32(
 define <vscale x 2 x float> @intrinsic_vfrsub_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv2f32_nxv2f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfrsub.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -349,7 +349,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfrsub.mask.nxv4f32.f32(
 define <vscale x 4 x float> @intrinsic_vfrsub_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv4f32_nxv4f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfrsub.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -389,7 +389,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfrsub.mask.nxv8f32.f32(
 define <vscale x 8 x float> @intrinsic_vfrsub_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv8f32_nxv8f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfrsub.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -429,7 +429,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfrsub.mask.nxv16f32.f32(
 define <vscale x 16 x float> @intrinsic_vfrsub_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv16f32_nxv16f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfrsub.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -469,7 +469,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfrsub.mask.nxv1f64.f64(
 define <vscale x 1 x double> @intrinsic_vfrsub_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv1f64_nxv1f64_f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x double> @llvm.riscv.vfrsub.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -509,7 +509,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfrsub.mask.nxv2f64.f64(
 define <vscale x 2 x double> @intrinsic_vfrsub_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv2f64_nxv2f64_f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x double> @llvm.riscv.vfrsub.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -549,7 +549,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfrsub.mask.nxv4f64.f64(
 define <vscale x 4 x double> @intrinsic_vfrsub_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv4f64_nxv4f64_f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x double> @llvm.riscv.vfrsub.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -589,7 +589,7 @@ declare <vscale x 8 x double> @llvm.riscv.vfrsub.mask.nxv8f64.f64(
 define <vscale x 8 x double> @intrinsic_vfrsub_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv8f64_nxv8f64_f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x double> @llvm.riscv.vfrsub.mask.nxv8f64.f64(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv32.ll
index 474e22555636..bfc666d6254b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfsgnj.mask.nxv1f16(
 define <vscale x 1 x half> @intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnj.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfsgnj.mask.nxv2f16(
 define <vscale x 2 x half> @intrinsic_vfsgnj_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnj.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfsgnj.mask.nxv4f16(
 define <vscale x 4 x half> @intrinsic_vfsgnj_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnj.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfsgnj.mask.nxv8f16(
 define <vscale x 8 x half> @intrinsic_vfsgnj_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnj.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfsgnj.mask.nxv16f16(
 define <vscale x 16 x half> @intrinsic_vfsgnj_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnj.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x half> @llvm.riscv.vfsgnj.mask.nxv32f16(
 define <vscale x 32 x half> @intrinsic_vfsgnj_mask_vv_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv32f16_nxv32f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnj.mask.nxv32f16(
     <vscale x 32 x half> %0,
@@ -268,7 +268,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfsgnj.mask.nxv1f32(
 define <vscale x 1 x float> @intrinsic_vfsgnj_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnj.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -308,7 +308,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfsgnj.mask.nxv2f32(
 define <vscale x 2 x float> @intrinsic_vfsgnj_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnj.mask.nxv2f32(
     <vscale x 2 x float> %0,
@@ -348,7 +348,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfsgnj.mask.nxv4f32(
 define <vscale x 4 x float> @intrinsic_vfsgnj_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnj.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -388,7 +388,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfsgnj.mask.nxv8f32(
 define <vscale x 8 x float> @intrinsic_vfsgnj_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnj.mask.nxv8f32(
     <vscale x 8 x float> %0,
@@ -428,7 +428,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfsgnj.mask.nxv16f32(
 define <vscale x 16 x float> @intrinsic_vfsgnj_mask_vv_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv16f32_nxv16f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnj.mask.nxv16f32(
     <vscale x 16 x float> %0,
@@ -468,7 +468,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfsgnj.mask.nxv1f16.f16(
 define <vscale x 1 x half> @intrinsic_vfsgnj_mask_vf_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv1f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnj.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -508,7 +508,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfsgnj.mask.nxv2f16.f16(
 define <vscale x 2 x half> @intrinsic_vfsgnj_mask_vf_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv2f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnj.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -548,7 +548,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfsgnj.mask.nxv4f16.f16(
 define <vscale x 4 x half> @intrinsic_vfsgnj_mask_vf_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv4f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnj.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -588,7 +588,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfsgnj.mask.nxv8f16.f16(
 define <vscale x 8 x half> @intrinsic_vfsgnj_mask_vf_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv8f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnj.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -628,7 +628,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfsgnj.mask.nxv16f16.f16(
 define <vscale x 16 x half> @intrinsic_vfsgnj_mask_vf_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv16f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnj.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -668,7 +668,7 @@ declare <vscale x 32 x half> @llvm.riscv.vfsgnj.mask.nxv32f16.f16(
 define <vscale x 32 x half> @intrinsic_vfsgnj_mask_vf_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv32f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnj.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -708,7 +708,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfsgnj.mask.nxv1f32.f32(
 define <vscale x 1 x float> @intrinsic_vfsgnj_mask_vf_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv1f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnj.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -748,7 +748,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfsgnj.mask.nxv2f32.f32(
 define <vscale x 2 x float> @intrinsic_vfsgnj_mask_vf_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv2f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnj.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -788,7 +788,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfsgnj.mask.nxv4f32.f32(
 define <vscale x 4 x float> @intrinsic_vfsgnj_mask_vf_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv4f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnj.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -828,7 +828,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfsgnj.mask.nxv8f32.f32(
 define <vscale x 8 x float> @intrinsic_vfsgnj_mask_vf_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv8f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnj.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -868,7 +868,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfsgnj.mask.nxv16f32.f32(
 define <vscale x 16 x float> @intrinsic_vfsgnj_mask_vf_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv16f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnj.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv64.ll
index 0690d14fdc04..17dd102e7169 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfsgnj.mask.nxv1f16(
 define <vscale x 1 x half> @intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnj.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfsgnj.mask.nxv2f16(
 define <vscale x 2 x half> @intrinsic_vfsgnj_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnj.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfsgnj.mask.nxv4f16(
 define <vscale x 4 x half> @intrinsic_vfsgnj_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnj.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfsgnj.mask.nxv8f16(
 define <vscale x 8 x half> @intrinsic_vfsgnj_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnj.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfsgnj.mask.nxv16f16(
 define <vscale x 16 x half> @intrinsic_vfsgnj_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnj.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x half> @llvm.riscv.vfsgnj.mask.nxv32f16(
 define <vscale x 32 x half> @intrinsic_vfsgnj_mask_vv_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv32f16_nxv32f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnj.mask.nxv32f16(
     <vscale x 32 x half> %0,
@@ -268,7 +268,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfsgnj.mask.nxv1f32(
 define <vscale x 1 x float> @intrinsic_vfsgnj_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnj.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -308,7 +308,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfsgnj.mask.nxv2f32(
 define <vscale x 2 x float> @intrinsic_vfsgnj_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnj.mask.nxv2f32(
     <vscale x 2 x float> %0,
@@ -348,7 +348,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfsgnj.mask.nxv4f32(
 define <vscale x 4 x float> @intrinsic_vfsgnj_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnj.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -388,7 +388,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfsgnj.mask.nxv8f32(
 define <vscale x 8 x float> @intrinsic_vfsgnj_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnj.mask.nxv8f32(
     <vscale x 8 x float> %0,
@@ -428,7 +428,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfsgnj.mask.nxv16f32(
 define <vscale x 16 x float> @intrinsic_vfsgnj_mask_vv_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv16f32_nxv16f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnj.mask.nxv16f32(
     <vscale x 16 x float> %0,
@@ -468,7 +468,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfsgnj.mask.nxv1f64(
 define <vscale x 1 x double> @intrinsic_vfsgnj_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f64_nxv1f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x double> @llvm.riscv.vfsgnj.mask.nxv1f64(
     <vscale x 1 x double> %0,
@@ -508,7 +508,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfsgnj.mask.nxv2f64(
 define <vscale x 2 x double> @intrinsic_vfsgnj_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv2f64_nxv2f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x double> @llvm.riscv.vfsgnj.mask.nxv2f64(
     <vscale x 2 x double> %0,
@@ -548,7 +548,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfsgnj.mask.nxv4f64(
 define <vscale x 4 x double> @intrinsic_vfsgnj_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv4f64_nxv4f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x double> @llvm.riscv.vfsgnj.mask.nxv4f64(
     <vscale x 4 x double> %0,
@@ -588,7 +588,7 @@ declare <vscale x 8 x double> @llvm.riscv.vfsgnj.mask.nxv8f64(
 define <vscale x 8 x double> @intrinsic_vfsgnj_mask_vv_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv8f64_nxv8f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x double> @llvm.riscv.vfsgnj.mask.nxv8f64(
     <vscale x 8 x double> %0,
@@ -628,7 +628,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfsgnj.mask.nxv1f16.f16(
 define <vscale x 1 x half> @intrinsic_vfsgnj_mask_vf_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv1f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnj.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -668,7 +668,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfsgnj.mask.nxv2f16.f16(
 define <vscale x 2 x half> @intrinsic_vfsgnj_mask_vf_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv2f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnj.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -708,7 +708,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfsgnj.mask.nxv4f16.f16(
 define <vscale x 4 x half> @intrinsic_vfsgnj_mask_vf_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv4f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnj.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -748,7 +748,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfsgnj.mask.nxv8f16.f16(
 define <vscale x 8 x half> @intrinsic_vfsgnj_mask_vf_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv8f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnj.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -788,7 +788,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfsgnj.mask.nxv16f16.f16(
 define <vscale x 16 x half> @intrinsic_vfsgnj_mask_vf_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv16f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnj.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -828,7 +828,7 @@ declare <vscale x 32 x half> @llvm.riscv.vfsgnj.mask.nxv32f16.f16(
 define <vscale x 32 x half> @intrinsic_vfsgnj_mask_vf_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv32f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnj.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -868,7 +868,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfsgnj.mask.nxv1f32.f32(
 define <vscale x 1 x float> @intrinsic_vfsgnj_mask_vf_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv1f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnj.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -908,7 +908,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfsgnj.mask.nxv2f32.f32(
 define <vscale x 2 x float> @intrinsic_vfsgnj_mask_vf_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv2f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnj.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -948,7 +948,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfsgnj.mask.nxv4f32.f32(
 define <vscale x 4 x float> @intrinsic_vfsgnj_mask_vf_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv4f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnj.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -988,7 +988,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfsgnj.mask.nxv8f32.f32(
 define <vscale x 8 x float> @intrinsic_vfsgnj_mask_vf_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv8f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnj.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfsgnj.mask.nxv16f32.f32(
 define <vscale x 16 x float> @intrinsic_vfsgnj_mask_vf_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv16f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnj.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfsgnj.mask.nxv1f64.f64(
 define <vscale x 1 x double> @intrinsic_vfsgnj_mask_vf_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv1f64_f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x double> @llvm.riscv.vfsgnj.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfsgnj.mask.nxv2f64.f64(
 define <vscale x 2 x double> @intrinsic_vfsgnj_mask_vf_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv2f64_f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x double> @llvm.riscv.vfsgnj.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfsgnj.mask.nxv4f64.f64(
 define <vscale x 4 x double> @intrinsic_vfsgnj_mask_vf_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv4f64_f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x double> @llvm.riscv.vfsgnj.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 8 x double> @llvm.riscv.vfsgnj.mask.nxv8f64.f64(
 define <vscale x 8 x double> @intrinsic_vfsgnj_mask_vf_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv8f64_f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x double> @llvm.riscv.vfsgnj.mask.nxv8f64.f64(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv32.ll
index bfdf8077515e..9831d2b98d65 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfsgnjn.mask.nxv1f16(
 define <vscale x 1 x half> @intrinsic_vfsgnjn_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjn.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfsgnjn.mask.nxv2f16(
 define <vscale x 2 x half> @intrinsic_vfsgnjn_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnjn.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfsgnjn.mask.nxv4f16(
 define <vscale x 4 x half> @intrinsic_vfsgnjn_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnjn.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfsgnjn.mask.nxv8f16(
 define <vscale x 8 x half> @intrinsic_vfsgnjn_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnjn.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfsgnjn.mask.nxv16f16(
 define <vscale x 16 x half> @intrinsic_vfsgnjn_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnjn.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x half> @llvm.riscv.vfsgnjn.mask.nxv32f16(
 define <vscale x 32 x half> @intrinsic_vfsgnjn_mask_vv_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv32f16_nxv32f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnjn.mask.nxv32f16(
     <vscale x 32 x half> %0,
@@ -268,7 +268,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfsgnjn.mask.nxv1f32(
 define <vscale x 1 x float> @intrinsic_vfsgnjn_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -308,7 +308,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfsgnjn.mask.nxv2f32(
 define <vscale x 2 x float> @intrinsic_vfsgnjn_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.mask.nxv2f32(
     <vscale x 2 x float> %0,
@@ -348,7 +348,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfsgnjn.mask.nxv4f32(
 define <vscale x 4 x float> @intrinsic_vfsgnjn_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -388,7 +388,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfsgnjn.mask.nxv8f32(
 define <vscale x 8 x float> @intrinsic_vfsgnjn_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.mask.nxv8f32(
     <vscale x 8 x float> %0,
@@ -428,7 +428,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfsgnjn.mask.nxv16f32(
 define <vscale x 16 x float> @intrinsic_vfsgnjn_mask_vv_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv16f32_nxv16f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.mask.nxv16f32(
     <vscale x 16 x float> %0,
@@ -468,7 +468,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfsgnjn.mask.nxv1f16.f16(
 define <vscale x 1 x half> @intrinsic_vfsgnjn_mask_vf_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv1f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjn.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -508,7 +508,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfsgnjn.mask.nxv2f16.f16(
 define <vscale x 2 x half> @intrinsic_vfsgnjn_mask_vf_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv2f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnjn.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -548,7 +548,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfsgnjn.mask.nxv4f16.f16(
 define <vscale x 4 x half> @intrinsic_vfsgnjn_mask_vf_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv4f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnjn.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -588,7 +588,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfsgnjn.mask.nxv8f16.f16(
 define <vscale x 8 x half> @intrinsic_vfsgnjn_mask_vf_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv8f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnjn.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -628,7 +628,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfsgnjn.mask.nxv16f16.f16(
 define <vscale x 16 x half> @intrinsic_vfsgnjn_mask_vf_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv16f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnjn.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -668,7 +668,7 @@ declare <vscale x 32 x half> @llvm.riscv.vfsgnjn.mask.nxv32f16.f16(
 define <vscale x 32 x half> @intrinsic_vfsgnjn_mask_vf_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv32f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnjn.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -708,7 +708,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfsgnjn.mask.nxv1f32.f32(
 define <vscale x 1 x float> @intrinsic_vfsgnjn_mask_vf_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv1f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -748,7 +748,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfsgnjn.mask.nxv2f32.f32(
 define <vscale x 2 x float> @intrinsic_vfsgnjn_mask_vf_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv2f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -788,7 +788,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfsgnjn.mask.nxv4f32.f32(
 define <vscale x 4 x float> @intrinsic_vfsgnjn_mask_vf_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv4f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -828,7 +828,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfsgnjn.mask.nxv8f32.f32(
 define <vscale x 8 x float> @intrinsic_vfsgnjn_mask_vf_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv8f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -868,7 +868,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfsgnjn.mask.nxv16f32.f32(
 define <vscale x 16 x float> @intrinsic_vfsgnjn_mask_vf_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv16f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv64.ll
index b8a2e2bbc42e..ba9b36d3385d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfsgnjn.mask.nxv1f16(
 define <vscale x 1 x half> @intrinsic_vfsgnjn_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjn.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfsgnjn.mask.nxv2f16(
 define <vscale x 2 x half> @intrinsic_vfsgnjn_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnjn.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfsgnjn.mask.nxv4f16(
 define <vscale x 4 x half> @intrinsic_vfsgnjn_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnjn.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfsgnjn.mask.nxv8f16(
 define <vscale x 8 x half> @intrinsic_vfsgnjn_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnjn.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfsgnjn.mask.nxv16f16(
 define <vscale x 16 x half> @intrinsic_vfsgnjn_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnjn.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x half> @llvm.riscv.vfsgnjn.mask.nxv32f16(
 define <vscale x 32 x half> @intrinsic_vfsgnjn_mask_vv_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv32f16_nxv32f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnjn.mask.nxv32f16(
     <vscale x 32 x half> %0,
@@ -268,7 +268,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfsgnjn.mask.nxv1f32(
 define <vscale x 1 x float> @intrinsic_vfsgnjn_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -308,7 +308,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfsgnjn.mask.nxv2f32(
 define <vscale x 2 x float> @intrinsic_vfsgnjn_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.mask.nxv2f32(
     <vscale x 2 x float> %0,
@@ -348,7 +348,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfsgnjn.mask.nxv4f32(
 define <vscale x 4 x float> @intrinsic_vfsgnjn_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -388,7 +388,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfsgnjn.mask.nxv8f32(
 define <vscale x 8 x float> @intrinsic_vfsgnjn_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.mask.nxv8f32(
     <vscale x 8 x float> %0,
@@ -428,7 +428,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfsgnjn.mask.nxv16f32(
 define <vscale x 16 x float> @intrinsic_vfsgnjn_mask_vv_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv16f32_nxv16f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.mask.nxv16f32(
     <vscale x 16 x float> %0,
@@ -468,7 +468,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfsgnjn.mask.nxv1f64(
 define <vscale x 1 x double> @intrinsic_vfsgnjn_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv1f64_nxv1f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.mask.nxv1f64(
     <vscale x 1 x double> %0,
@@ -508,7 +508,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfsgnjn.mask.nxv2f64(
 define <vscale x 2 x double> @intrinsic_vfsgnjn_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv2f64_nxv2f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.mask.nxv2f64(
     <vscale x 2 x double> %0,
@@ -548,7 +548,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfsgnjn.mask.nxv4f64(
 define <vscale x 4 x double> @intrinsic_vfsgnjn_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv4f64_nxv4f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.mask.nxv4f64(
     <vscale x 4 x double> %0,
@@ -588,7 +588,7 @@ declare <vscale x 8 x double> @llvm.riscv.vfsgnjn.mask.nxv8f64(
 define <vscale x 8 x double> @intrinsic_vfsgnjn_mask_vv_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv8f64_nxv8f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.mask.nxv8f64(
     <vscale x 8 x double> %0,
@@ -628,7 +628,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfsgnjn.mask.nxv1f16.f16(
 define <vscale x 1 x half> @intrinsic_vfsgnjn_mask_vf_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv1f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjn.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -668,7 +668,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfsgnjn.mask.nxv2f16.f16(
 define <vscale x 2 x half> @intrinsic_vfsgnjn_mask_vf_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv2f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnjn.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -708,7 +708,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfsgnjn.mask.nxv4f16.f16(
 define <vscale x 4 x half> @intrinsic_vfsgnjn_mask_vf_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv4f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnjn.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -748,7 +748,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfsgnjn.mask.nxv8f16.f16(
 define <vscale x 8 x half> @intrinsic_vfsgnjn_mask_vf_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv8f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnjn.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -788,7 +788,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfsgnjn.mask.nxv16f16.f16(
 define <vscale x 16 x half> @intrinsic_vfsgnjn_mask_vf_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv16f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnjn.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -828,7 +828,7 @@ declare <vscale x 32 x half> @llvm.riscv.vfsgnjn.mask.nxv32f16.f16(
 define <vscale x 32 x half> @intrinsic_vfsgnjn_mask_vf_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv32f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnjn.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -868,7 +868,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfsgnjn.mask.nxv1f32.f32(
 define <vscale x 1 x float> @intrinsic_vfsgnjn_mask_vf_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv1f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -908,7 +908,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfsgnjn.mask.nxv2f32.f32(
 define <vscale x 2 x float> @intrinsic_vfsgnjn_mask_vf_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv2f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -948,7 +948,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfsgnjn.mask.nxv4f32.f32(
 define <vscale x 4 x float> @intrinsic_vfsgnjn_mask_vf_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv4f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -988,7 +988,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfsgnjn.mask.nxv8f32.f32(
 define <vscale x 8 x float> @intrinsic_vfsgnjn_mask_vf_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv8f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfsgnjn.mask.nxv16f32.f32(
 define <vscale x 16 x float> @intrinsic_vfsgnjn_mask_vf_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv16f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfsgnjn.mask.nxv1f64.f64(
 define <vscale x 1 x double> @intrinsic_vfsgnjn_mask_vf_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv1f64_f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfsgnjn.mask.nxv2f64.f64(
 define <vscale x 2 x double> @intrinsic_vfsgnjn_mask_vf_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv2f64_f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfsgnjn.mask.nxv4f64.f64(
 define <vscale x 4 x double> @intrinsic_vfsgnjn_mask_vf_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv4f64_f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 8 x double> @llvm.riscv.vfsgnjn.mask.nxv8f64.f64(
 define <vscale x 8 x double> @intrinsic_vfsgnjn_mask_vf_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv8f64_f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.mask.nxv8f64.f64(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv32.ll
index 88102f157324..2d7b198ce63a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfsgnjx.mask.nxv1f16(
 define <vscale x 1 x half> @intrinsic_vfsgnjx_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjx.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfsgnjx.mask.nxv2f16(
 define <vscale x 2 x half> @intrinsic_vfsgnjx_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnjx.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfsgnjx.mask.nxv4f16(
 define <vscale x 4 x half> @intrinsic_vfsgnjx_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnjx.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfsgnjx.mask.nxv8f16(
 define <vscale x 8 x half> @intrinsic_vfsgnjx_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnjx.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfsgnjx.mask.nxv16f16(
 define <vscale x 16 x half> @intrinsic_vfsgnjx_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnjx.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x half> @llvm.riscv.vfsgnjx.mask.nxv32f16(
 define <vscale x 32 x half> @intrinsic_vfsgnjx_mask_vv_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv32f16_nxv32f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnjx.mask.nxv32f16(
     <vscale x 32 x half> %0,
@@ -268,7 +268,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfsgnjx.mask.nxv1f32(
 define <vscale x 1 x float> @intrinsic_vfsgnjx_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -308,7 +308,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfsgnjx.mask.nxv2f32(
 define <vscale x 2 x float> @intrinsic_vfsgnjx_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.mask.nxv2f32(
     <vscale x 2 x float> %0,
@@ -348,7 +348,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfsgnjx.mask.nxv4f32(
 define <vscale x 4 x float> @intrinsic_vfsgnjx_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -388,7 +388,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfsgnjx.mask.nxv8f32(
 define <vscale x 8 x float> @intrinsic_vfsgnjx_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.mask.nxv8f32(
     <vscale x 8 x float> %0,
@@ -428,7 +428,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfsgnjx.mask.nxv16f32(
 define <vscale x 16 x float> @intrinsic_vfsgnjx_mask_vv_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv16f32_nxv16f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.mask.nxv16f32(
     <vscale x 16 x float> %0,
@@ -468,7 +468,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfsgnjx.mask.nxv1f16.f16(
 define <vscale x 1 x half> @intrinsic_vfsgnjx_mask_vf_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv1f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjx.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -508,7 +508,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfsgnjx.mask.nxv2f16.f16(
 define <vscale x 2 x half> @intrinsic_vfsgnjx_mask_vf_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv2f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnjx.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -548,7 +548,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfsgnjx.mask.nxv4f16.f16(
 define <vscale x 4 x half> @intrinsic_vfsgnjx_mask_vf_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv4f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnjx.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -588,7 +588,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfsgnjx.mask.nxv8f16.f16(
 define <vscale x 8 x half> @intrinsic_vfsgnjx_mask_vf_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv8f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnjx.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -628,7 +628,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfsgnjx.mask.nxv16f16.f16(
 define <vscale x 16 x half> @intrinsic_vfsgnjx_mask_vf_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv16f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnjx.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -668,7 +668,7 @@ declare <vscale x 32 x half> @llvm.riscv.vfsgnjx.mask.nxv32f16.f16(
 define <vscale x 32 x half> @intrinsic_vfsgnjx_mask_vf_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv32f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnjx.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -708,7 +708,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfsgnjx.mask.nxv1f32.f32(
 define <vscale x 1 x float> @intrinsic_vfsgnjx_mask_vf_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv1f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -748,7 +748,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfsgnjx.mask.nxv2f32.f32(
 define <vscale x 2 x float> @intrinsic_vfsgnjx_mask_vf_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv2f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -788,7 +788,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfsgnjx.mask.nxv4f32.f32(
 define <vscale x 4 x float> @intrinsic_vfsgnjx_mask_vf_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv4f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -828,7 +828,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfsgnjx.mask.nxv8f32.f32(
 define <vscale x 8 x float> @intrinsic_vfsgnjx_mask_vf_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv8f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -868,7 +868,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfsgnjx.mask.nxv16f32.f32(
 define <vscale x 16 x float> @intrinsic_vfsgnjx_mask_vf_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv16f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv64.ll
index a6df3ba7a16a..752535e61423 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfsgnjx.mask.nxv1f16(
 define <vscale x 1 x half> @intrinsic_vfsgnjx_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjx.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfsgnjx.mask.nxv2f16(
 define <vscale x 2 x half> @intrinsic_vfsgnjx_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnjx.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfsgnjx.mask.nxv4f16(
 define <vscale x 4 x half> @intrinsic_vfsgnjx_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnjx.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfsgnjx.mask.nxv8f16(
 define <vscale x 8 x half> @intrinsic_vfsgnjx_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnjx.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfsgnjx.mask.nxv16f16(
 define <vscale x 16 x half> @intrinsic_vfsgnjx_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnjx.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x half> @llvm.riscv.vfsgnjx.mask.nxv32f16(
 define <vscale x 32 x half> @intrinsic_vfsgnjx_mask_vv_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv32f16_nxv32f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnjx.mask.nxv32f16(
     <vscale x 32 x half> %0,
@@ -268,7 +268,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfsgnjx.mask.nxv1f32(
 define <vscale x 1 x float> @intrinsic_vfsgnjx_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -308,7 +308,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfsgnjx.mask.nxv2f32(
 define <vscale x 2 x float> @intrinsic_vfsgnjx_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.mask.nxv2f32(
     <vscale x 2 x float> %0,
@@ -348,7 +348,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfsgnjx.mask.nxv4f32(
 define <vscale x 4 x float> @intrinsic_vfsgnjx_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -388,7 +388,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfsgnjx.mask.nxv8f32(
 define <vscale x 8 x float> @intrinsic_vfsgnjx_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.mask.nxv8f32(
     <vscale x 8 x float> %0,
@@ -428,7 +428,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfsgnjx.mask.nxv16f32(
 define <vscale x 16 x float> @intrinsic_vfsgnjx_mask_vv_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv16f32_nxv16f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.mask.nxv16f32(
     <vscale x 16 x float> %0,
@@ -468,7 +468,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfsgnjx.mask.nxv1f64(
 define <vscale x 1 x double> @intrinsic_vfsgnjx_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv1f64_nxv1f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.mask.nxv1f64(
     <vscale x 1 x double> %0,
@@ -508,7 +508,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfsgnjx.mask.nxv2f64(
 define <vscale x 2 x double> @intrinsic_vfsgnjx_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv2f64_nxv2f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.mask.nxv2f64(
     <vscale x 2 x double> %0,
@@ -548,7 +548,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfsgnjx.mask.nxv4f64(
 define <vscale x 4 x double> @intrinsic_vfsgnjx_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv4f64_nxv4f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.mask.nxv4f64(
     <vscale x 4 x double> %0,
@@ -588,7 +588,7 @@ declare <vscale x 8 x double> @llvm.riscv.vfsgnjx.mask.nxv8f64(
 define <vscale x 8 x double> @intrinsic_vfsgnjx_mask_vv_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv8f64_nxv8f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.mask.nxv8f64(
     <vscale x 8 x double> %0,
@@ -628,7 +628,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfsgnjx.mask.nxv1f16.f16(
 define <vscale x 1 x half> @intrinsic_vfsgnjx_mask_vf_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv1f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjx.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -668,7 +668,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfsgnjx.mask.nxv2f16.f16(
 define <vscale x 2 x half> @intrinsic_vfsgnjx_mask_vf_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv2f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnjx.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -708,7 +708,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfsgnjx.mask.nxv4f16.f16(
 define <vscale x 4 x half> @intrinsic_vfsgnjx_mask_vf_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv4f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnjx.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -748,7 +748,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfsgnjx.mask.nxv8f16.f16(
 define <vscale x 8 x half> @intrinsic_vfsgnjx_mask_vf_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv8f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnjx.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -788,7 +788,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfsgnjx.mask.nxv16f16.f16(
 define <vscale x 16 x half> @intrinsic_vfsgnjx_mask_vf_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv16f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnjx.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -828,7 +828,7 @@ declare <vscale x 32 x half> @llvm.riscv.vfsgnjx.mask.nxv32f16.f16(
 define <vscale x 32 x half> @intrinsic_vfsgnjx_mask_vf_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv32f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnjx.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -868,7 +868,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfsgnjx.mask.nxv1f32.f32(
 define <vscale x 1 x float> @intrinsic_vfsgnjx_mask_vf_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv1f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -908,7 +908,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfsgnjx.mask.nxv2f32.f32(
 define <vscale x 2 x float> @intrinsic_vfsgnjx_mask_vf_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv2f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -948,7 +948,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfsgnjx.mask.nxv4f32.f32(
 define <vscale x 4 x float> @intrinsic_vfsgnjx_mask_vf_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv4f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -988,7 +988,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfsgnjx.mask.nxv8f32.f32(
 define <vscale x 8 x float> @intrinsic_vfsgnjx_mask_vf_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv8f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfsgnjx.mask.nxv16f32.f32(
 define <vscale x 16 x float> @intrinsic_vfsgnjx_mask_vf_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv16f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfsgnjx.mask.nxv1f64.f64(
 define <vscale x 1 x double> @intrinsic_vfsgnjx_mask_vf_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv1f64_f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfsgnjx.mask.nxv2f64.f64(
 define <vscale x 2 x double> @intrinsic_vfsgnjx_mask_vf_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv2f64_f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfsgnjx.mask.nxv4f64.f64(
 define <vscale x 4 x double> @intrinsic_vfsgnjx_mask_vf_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv4f64_f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 8 x double> @llvm.riscv.vfsgnjx.mask.nxv8f64.f64(
 define <vscale x 8 x double> @intrinsic_vfsgnjx_mask_vf_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv8f64_f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.mask.nxv8f64.f64(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv32.ll
index c1e49e56602d..ffe994199c86 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv32.ll
@@ -33,7 +33,7 @@ define <vscale x 1 x half> @intrinsic_vfslide1down_mask_vf_nxv1f16_nxv1f16_f16(<
 ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f16_nxv1f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v16, v17, ft0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -79,7 +79,7 @@ define <vscale x 2 x half> @intrinsic_vfslide1down_mask_vf_nxv2f16_nxv2f16_f16(<
 ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f16_nxv2f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v16, v17, ft0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -125,7 +125,7 @@ define <vscale x 4 x half> @intrinsic_vfslide1down_mask_vf_nxv4f16_nxv4f16_f16(<
 ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f16_nxv4f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v16, v17, ft0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -171,7 +171,7 @@ define <vscale x 8 x half> @intrinsic_vfslide1down_mask_vf_nxv8f16_nxv8f16_f16(<
 ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8f16_nxv8f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v16, v18, ft0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -217,7 +217,7 @@ define <vscale x 16 x half> @intrinsic_vfslide1down_mask_vf_nxv16f16_nxv16f16_f1
 ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv16f16_nxv16f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v16, v20, ft0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -265,7 +265,7 @@ define <vscale x 32 x half> @intrinsic_vfslide1down_mask_vf_nxv32f16_nxv32f16_f1
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m8,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    fmv.h.x ft0, a1
-; CHECK-NEXT:    vsetvli a0, a2, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e16,m8,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v16, v8, ft0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -311,7 +311,7 @@ define <vscale x 1 x float> @intrinsic_vfslide1down_mask_vf_nxv1f32_nxv1f32_f32(
 ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f32_nxv1f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v16, v17, ft0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -357,7 +357,7 @@ define <vscale x 2 x float> @intrinsic_vfslide1down_mask_vf_nxv2f32_nxv2f32_f32(
 ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f32_nxv2f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v16, v17, ft0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -403,7 +403,7 @@ define <vscale x 4 x float> @intrinsic_vfslide1down_mask_vf_nxv4f32_nxv4f32_f32(
 ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f32_nxv4f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v16, v18, ft0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -449,7 +449,7 @@ define <vscale x 8 x float> @intrinsic_vfslide1down_mask_vf_nxv8f32_nxv8f32_f32(
 ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8f32_nxv8f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v16, v20, ft0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -497,7 +497,7 @@ define <vscale x 16 x float> @intrinsic_vfslide1down_mask_vf_nxv16f32_nxv16f32_f
 ; CHECK-NEXT:    vsetvli a3, zero, e32,m8,ta,mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
 ; CHECK-NEXT:    fmv.w.x ft0, a1
-; CHECK-NEXT:    vsetvli a0, a2, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e32,m8,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v16, v8, ft0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv64.ll
index 0344da54fe43..72896e58bc18 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv64.ll
@@ -33,7 +33,7 @@ define <vscale x 1 x half> @intrinsic_vfslide1down_mask_vf_nxv1f16_nxv1f16_f16(<
 ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f16_nxv1f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v16, v17, ft0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -79,7 +79,7 @@ define <vscale x 2 x half> @intrinsic_vfslide1down_mask_vf_nxv2f16_nxv2f16_f16(<
 ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f16_nxv2f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v16, v17, ft0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -125,7 +125,7 @@ define <vscale x 4 x half> @intrinsic_vfslide1down_mask_vf_nxv4f16_nxv4f16_f16(<
 ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f16_nxv4f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v16, v17, ft0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -171,7 +171,7 @@ define <vscale x 8 x half> @intrinsic_vfslide1down_mask_vf_nxv8f16_nxv8f16_f16(<
 ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8f16_nxv8f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v16, v18, ft0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -217,7 +217,7 @@ define <vscale x 16 x half> @intrinsic_vfslide1down_mask_vf_nxv16f16_nxv16f16_f1
 ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv16f16_nxv16f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v16, v20, ft0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -265,7 +265,7 @@ define <vscale x 32 x half> @intrinsic_vfslide1down_mask_vf_nxv32f16_nxv32f16_f1
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m8,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    fmv.h.x ft0, a1
-; CHECK-NEXT:    vsetvli a0, a2, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e16,m8,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v16, v8, ft0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -311,7 +311,7 @@ define <vscale x 1 x float> @intrinsic_vfslide1down_mask_vf_nxv1f32_nxv1f32_f32(
 ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f32_nxv1f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v16, v17, ft0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -357,7 +357,7 @@ define <vscale x 2 x float> @intrinsic_vfslide1down_mask_vf_nxv2f32_nxv2f32_f32(
 ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f32_nxv2f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v16, v17, ft0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -403,7 +403,7 @@ define <vscale x 4 x float> @intrinsic_vfslide1down_mask_vf_nxv4f32_nxv4f32_f32(
 ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f32_nxv4f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v16, v18, ft0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -449,7 +449,7 @@ define <vscale x 8 x float> @intrinsic_vfslide1down_mask_vf_nxv8f32_nxv8f32_f32(
 ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8f32_nxv8f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v16, v20, ft0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -497,7 +497,7 @@ define <vscale x 16 x float> @intrinsic_vfslide1down_mask_vf_nxv16f32_nxv16f32_f
 ; CHECK-NEXT:    vsetvli a3, zero, e32,m8,ta,mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
 ; CHECK-NEXT:    fmv.w.x ft0, a1
-; CHECK-NEXT:    vsetvli a0, a2, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e32,m8,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v16, v8, ft0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -543,7 +543,7 @@ define <vscale x 1 x double> @intrinsic_vfslide1down_mask_vf_nxv1f64_nxv1f64_f64
 ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f64_nxv1f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v16, v17, ft0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -589,7 +589,7 @@ define <vscale x 2 x double> @intrinsic_vfslide1down_mask_vf_nxv2f64_nxv2f64_f64
 ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f64_nxv2f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v16, v18, ft0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -635,7 +635,7 @@ define <vscale x 4 x double> @intrinsic_vfslide1down_mask_vf_nxv4f64_nxv4f64_f64
 ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f64_nxv4f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v16, v20, ft0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -683,7 +683,7 @@ define <vscale x 8 x double> @intrinsic_vfslide1down_mask_vf_nxv8f64_nxv8f64_f64
 ; CHECK-NEXT:    vsetvli a3, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
 ; CHECK-NEXT:    fmv.d.x ft0, a1
-; CHECK-NEXT:    vsetvli a0, a2, e64,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v16, v8, ft0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv32.ll
index ce849b6d286e..7147ebb6f85a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv32.ll
@@ -34,7 +34,7 @@ define <vscale x 1 x half> @intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16(<vs
 ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v16, v17, ft0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -81,7 +81,7 @@ define <vscale x 2 x half> @intrinsic_vfslide1up_mask_vf_nxv2f16_nxv2f16_f16(<vs
 ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f16_nxv2f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v16, v17, ft0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -128,7 +128,7 @@ define <vscale x 4 x half> @intrinsic_vfslide1up_mask_vf_nxv4f16_nxv4f16_f16(<vs
 ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f16_nxv4f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v16, v17, ft0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -175,7 +175,7 @@ define <vscale x 8 x half> @intrinsic_vfslide1up_mask_vf_nxv8f16_nxv8f16_f16(<vs
 ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8f16_nxv8f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v16, v18, ft0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -222,7 +222,7 @@ define <vscale x 16 x half> @intrinsic_vfslide1up_mask_vf_nxv16f16_nxv16f16_f16(
 ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv16f16_nxv16f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v16, v20, ft0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -271,7 +271,7 @@ define <vscale x 32 x half> @intrinsic_vfslide1up_mask_vf_nxv32f16_nxv32f16_f16(
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m8,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    fmv.h.x ft0, a1
-; CHECK-NEXT:    vsetvli a0, a2, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e16,m8,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v16, v8, ft0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -318,7 +318,7 @@ define <vscale x 1 x float> @intrinsic_vfslide1up_mask_vf_nxv1f32_nxv1f32_f32(<v
 ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f32_nxv1f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v16, v17, ft0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -365,7 +365,7 @@ define <vscale x 2 x float> @intrinsic_vfslide1up_mask_vf_nxv2f32_nxv2f32_f32(<v
 ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f32_nxv2f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v16, v17, ft0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -412,7 +412,7 @@ define <vscale x 4 x float> @intrinsic_vfslide1up_mask_vf_nxv4f32_nxv4f32_f32(<v
 ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f32_nxv4f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v16, v18, ft0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -459,7 +459,7 @@ define <vscale x 8 x float> @intrinsic_vfslide1up_mask_vf_nxv8f32_nxv8f32_f32(<v
 ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8f32_nxv8f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v16, v20, ft0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -508,7 +508,7 @@ define <vscale x 16 x float> @intrinsic_vfslide1up_mask_vf_nxv16f32_nxv16f32_f32
 ; CHECK-NEXT:    vsetvli a3, zero, e32,m8,ta,mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
 ; CHECK-NEXT:    fmv.w.x ft0, a1
-; CHECK-NEXT:    vsetvli a0, a2, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e32,m8,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v16, v8, ft0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv64.ll
index 765e186116ae..7ef44ad36d78 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv64.ll
@@ -34,7 +34,7 @@ define <vscale x 1 x half> @intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16(<vs
 ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v16, v17, ft0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -81,7 +81,7 @@ define <vscale x 2 x half> @intrinsic_vfslide1up_mask_vf_nxv2f16_nxv2f16_f16(<vs
 ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f16_nxv2f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v16, v17, ft0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -128,7 +128,7 @@ define <vscale x 4 x half> @intrinsic_vfslide1up_mask_vf_nxv4f16_nxv4f16_f16(<vs
 ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f16_nxv4f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v16, v17, ft0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -175,7 +175,7 @@ define <vscale x 8 x half> @intrinsic_vfslide1up_mask_vf_nxv8f16_nxv8f16_f16(<vs
 ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8f16_nxv8f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v16, v18, ft0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -222,7 +222,7 @@ define <vscale x 16 x half> @intrinsic_vfslide1up_mask_vf_nxv16f16_nxv16f16_f16(
 ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv16f16_nxv16f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v16, v20, ft0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -271,7 +271,7 @@ define <vscale x 32 x half> @intrinsic_vfslide1up_mask_vf_nxv32f16_nxv32f16_f16(
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m8,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    fmv.h.x ft0, a1
-; CHECK-NEXT:    vsetvli a0, a2, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e16,m8,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v16, v8, ft0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -318,7 +318,7 @@ define <vscale x 1 x float> @intrinsic_vfslide1up_mask_vf_nxv1f32_nxv1f32_f32(<v
 ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f32_nxv1f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v16, v17, ft0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -365,7 +365,7 @@ define <vscale x 2 x float> @intrinsic_vfslide1up_mask_vf_nxv2f32_nxv2f32_f32(<v
 ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f32_nxv2f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v16, v17, ft0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -412,7 +412,7 @@ define <vscale x 4 x float> @intrinsic_vfslide1up_mask_vf_nxv4f32_nxv4f32_f32(<v
 ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f32_nxv4f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v16, v18, ft0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -459,7 +459,7 @@ define <vscale x 8 x float> @intrinsic_vfslide1up_mask_vf_nxv8f32_nxv8f32_f32(<v
 ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8f32_nxv8f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v16, v20, ft0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -508,7 +508,7 @@ define <vscale x 16 x float> @intrinsic_vfslide1up_mask_vf_nxv16f32_nxv16f32_f32
 ; CHECK-NEXT:    vsetvli a3, zero, e32,m8,ta,mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
 ; CHECK-NEXT:    fmv.w.x ft0, a1
-; CHECK-NEXT:    vsetvli a0, a2, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e32,m8,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v16, v8, ft0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -555,7 +555,7 @@ define <vscale x 1 x double> @intrinsic_vfslide1up_mask_vf_nxv1f64_nxv1f64_f64(<
 ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f64_nxv1f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v16, v17, ft0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -602,7 +602,7 @@ define <vscale x 2 x double> @intrinsic_vfslide1up_mask_vf_nxv2f64_nxv2f64_f64(<
 ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f64_nxv2f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v16, v18, ft0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -649,7 +649,7 @@ define <vscale x 4 x double> @intrinsic_vfslide1up_mask_vf_nxv4f64_nxv4f64_f64(<
 ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f64_nxv4f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v16, v20, ft0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -698,7 +698,7 @@ define <vscale x 8 x double> @intrinsic_vfslide1up_mask_vf_nxv8f64_nxv8f64_f64(<
 ; CHECK-NEXT:    vsetvli a3, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
 ; CHECK-NEXT:    fmv.d.x ft0, a1
-; CHECK-NEXT:    vsetvli a0, a2, e64,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v16, v8, ft0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub-rv32.ll
index 33c0b319c439..14ccd4ce867a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfsub-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsub-rv32.ll
@@ -29,7 +29,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfsub.mask.nxv1f16.nxv1f16(
 define <vscale x 1 x half> @intrinsic_vfsub_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv1f16_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vfsub.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -69,7 +69,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfsub.mask.nxv2f16.nxv2f16(
 define <vscale x 2 x half> @intrinsic_vfsub_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv2f16_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vfsub.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -109,7 +109,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfsub.mask.nxv4f16.nxv4f16(
 define <vscale x 4 x half> @intrinsic_vfsub_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv4f16_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vfsub.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -149,7 +149,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfsub.mask.nxv8f16.nxv8f16(
 define <vscale x 8 x half> @intrinsic_vfsub_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8f16_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vfsub.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -189,7 +189,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfsub.mask.nxv16f16.nxv16f16(
 define <vscale x 16 x half> @intrinsic_vfsub_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv16f16_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vfsub.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -229,7 +229,7 @@ declare <vscale x 32 x half> @llvm.riscv.vfsub.mask.nxv32f16.nxv32f16(
 define <vscale x 32 x half> @intrinsic_vfsub_mask_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv32f16_nxv32f16_nxv32f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x half> @llvm.riscv.vfsub.mask.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -269,7 +269,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32(
 define <vscale x 1 x float> @intrinsic_vfsub_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv1f32_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -309,7 +309,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfsub.mask.nxv2f32.nxv2f32(
 define <vscale x 2 x float> @intrinsic_vfsub_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv2f32_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfsub.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -349,7 +349,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfsub.mask.nxv4f32.nxv4f32(
 define <vscale x 4 x float> @intrinsic_vfsub_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv4f32_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfsub.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -389,7 +389,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfsub.mask.nxv8f32.nxv8f32(
 define <vscale x 8 x float> @intrinsic_vfsub_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8f32_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfsub.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -429,7 +429,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfsub.mask.nxv16f32.nxv16f32(
 define <vscale x 16 x float> @intrinsic_vfsub_mask_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv16f32_nxv16f32_nxv16f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfsub.mask.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -469,7 +469,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfsub.mask.nxv1f16.f16(
 define <vscale x 1 x half> @intrinsic_vfsub_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1f16_nxv1f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vfsub.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -509,7 +509,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfsub.mask.nxv2f16.f16(
 define <vscale x 2 x half> @intrinsic_vfsub_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2f16_nxv2f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vfsub.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -549,7 +549,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfsub.mask.nxv4f16.f16(
 define <vscale x 4 x half> @intrinsic_vfsub_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4f16_nxv4f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vfsub.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -589,7 +589,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfsub.mask.nxv8f16.f16(
 define <vscale x 8 x half> @intrinsic_vfsub_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8f16_nxv8f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vfsub.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -629,7 +629,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfsub.mask.nxv16f16.f16(
 define <vscale x 16 x half> @intrinsic_vfsub_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv16f16_nxv16f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vfsub.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -669,7 +669,7 @@ declare <vscale x 32 x half> @llvm.riscv.vfsub.mask.nxv32f16.f16(
 define <vscale x 32 x half> @intrinsic_vfsub_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv32f16_nxv32f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 32 x half> @llvm.riscv.vfsub.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -709,7 +709,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.f32(
 define <vscale x 1 x float> @intrinsic_vfsub_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1f32_nxv1f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -749,7 +749,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfsub.mask.nxv2f32.f32(
 define <vscale x 2 x float> @intrinsic_vfsub_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2f32_nxv2f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfsub.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -789,7 +789,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfsub.mask.nxv4f32.f32(
 define <vscale x 4 x float> @intrinsic_vfsub_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4f32_nxv4f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfsub.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -829,7 +829,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfsub.mask.nxv8f32.f32(
 define <vscale x 8 x float> @intrinsic_vfsub_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8f32_nxv8f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfsub.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -869,7 +869,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfsub.mask.nxv16f32.f32(
 define <vscale x 16 x float> @intrinsic_vfsub_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv16f32_nxv16f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfsub.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub-rv64.ll
index 79b915f276f3..9b5098f281c5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfsub-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsub-rv64.ll
@@ -29,7 +29,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfsub.mask.nxv1f16.nxv1f16(
 define <vscale x 1 x half> @intrinsic_vfsub_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv1f16_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vfsub.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -69,7 +69,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfsub.mask.nxv2f16.nxv2f16(
 define <vscale x 2 x half> @intrinsic_vfsub_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv2f16_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vfsub.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -109,7 +109,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfsub.mask.nxv4f16.nxv4f16(
 define <vscale x 4 x half> @intrinsic_vfsub_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv4f16_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vfsub.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -149,7 +149,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfsub.mask.nxv8f16.nxv8f16(
 define <vscale x 8 x half> @intrinsic_vfsub_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8f16_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vfsub.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -189,7 +189,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfsub.mask.nxv16f16.nxv16f16(
 define <vscale x 16 x half> @intrinsic_vfsub_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv16f16_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vfsub.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -229,7 +229,7 @@ declare <vscale x 32 x half> @llvm.riscv.vfsub.mask.nxv32f16.nxv32f16(
 define <vscale x 32 x half> @intrinsic_vfsub_mask_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv32f16_nxv32f16_nxv32f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x half> @llvm.riscv.vfsub.mask.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -269,7 +269,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32(
 define <vscale x 1 x float> @intrinsic_vfsub_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv1f32_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -309,7 +309,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfsub.mask.nxv2f32.nxv2f32(
 define <vscale x 2 x float> @intrinsic_vfsub_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv2f32_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfsub.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -349,7 +349,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfsub.mask.nxv4f32.nxv4f32(
 define <vscale x 4 x float> @intrinsic_vfsub_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv4f32_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfsub.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -389,7 +389,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfsub.mask.nxv8f32.nxv8f32(
 define <vscale x 8 x float> @intrinsic_vfsub_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8f32_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfsub.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -429,7 +429,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfsub.mask.nxv16f32.nxv16f32(
 define <vscale x 16 x float> @intrinsic_vfsub_mask_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv16f32_nxv16f32_nxv16f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfsub.mask.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -469,7 +469,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfsub.mask.nxv1f64.nxv1f64(
 define <vscale x 1 x double> @intrinsic_vfsub_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv1f64_nxv1f64_nxv1f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x double> @llvm.riscv.vfsub.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -509,7 +509,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfsub.mask.nxv2f64.nxv2f64(
 define <vscale x 2 x double> @intrinsic_vfsub_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv2f64_nxv2f64_nxv2f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x double> @llvm.riscv.vfsub.mask.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -549,7 +549,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfsub.mask.nxv4f64.nxv4f64(
 define <vscale x 4 x double> @intrinsic_vfsub_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv4f64_nxv4f64_nxv4f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x double> @llvm.riscv.vfsub.mask.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -589,7 +589,7 @@ declare <vscale x 8 x double> @llvm.riscv.vfsub.mask.nxv8f64.nxv8f64(
 define <vscale x 8 x double> @intrinsic_vfsub_mask_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8f64_nxv8f64_nxv8f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x double> @llvm.riscv.vfsub.mask.nxv8f64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -629,7 +629,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfsub.mask.nxv1f16.f16(
 define <vscale x 1 x half> @intrinsic_vfsub_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1f16_nxv1f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vfsub.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -669,7 +669,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfsub.mask.nxv2f16.f16(
 define <vscale x 2 x half> @intrinsic_vfsub_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2f16_nxv2f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vfsub.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -709,7 +709,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfsub.mask.nxv4f16.f16(
 define <vscale x 4 x half> @intrinsic_vfsub_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4f16_nxv4f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vfsub.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -749,7 +749,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfsub.mask.nxv8f16.f16(
 define <vscale x 8 x half> @intrinsic_vfsub_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8f16_nxv8f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vfsub.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -789,7 +789,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfsub.mask.nxv16f16.f16(
 define <vscale x 16 x half> @intrinsic_vfsub_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv16f16_nxv16f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vfsub.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -829,7 +829,7 @@ declare <vscale x 32 x half> @llvm.riscv.vfsub.mask.nxv32f16.f16(
 define <vscale x 32 x half> @intrinsic_vfsub_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv32f16_nxv32f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 32 x half> @llvm.riscv.vfsub.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -869,7 +869,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.f32(
 define <vscale x 1 x float> @intrinsic_vfsub_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1f32_nxv1f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -909,7 +909,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfsub.mask.nxv2f32.f32(
 define <vscale x 2 x float> @intrinsic_vfsub_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2f32_nxv2f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfsub.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -949,7 +949,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfsub.mask.nxv4f32.f32(
 define <vscale x 4 x float> @intrinsic_vfsub_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4f32_nxv4f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfsub.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -989,7 +989,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfsub.mask.nxv8f32.f32(
 define <vscale x 8 x float> @intrinsic_vfsub_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8f32_nxv8f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfsub.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1029,7 +1029,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfsub.mask.nxv16f32.f32(
 define <vscale x 16 x float> @intrinsic_vfsub_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv16f32_nxv16f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfsub.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1069,7 +1069,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfsub.mask.nxv1f64.f64(
 define <vscale x 1 x double> @intrinsic_vfsub_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1f64_nxv1f64_f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x double> @llvm.riscv.vfsub.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1109,7 +1109,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfsub.mask.nxv2f64.f64(
 define <vscale x 2 x double> @intrinsic_vfsub_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2f64_nxv2f64_f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x double> @llvm.riscv.vfsub.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1149,7 +1149,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfsub.mask.nxv4f64.f64(
 define <vscale x 4 x double> @intrinsic_vfsub_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4f64_nxv4f64_f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x double> @llvm.riscv.vfsub.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1189,7 +1189,7 @@ declare <vscale x 8 x double> @llvm.riscv.vfsub.mask.nxv8f64.f64(
 define <vscale x 8 x double> @intrinsic_vfsub_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8f64_nxv8f64_f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x double> @llvm.riscv.vfsub.mask.nxv8f64.f64(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv32.ll
index 265d33ebb526..943f2009181c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f16(
 define <vscale x 1 x float> @intrinsic_vfwadd_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f16(
     <vscale x 1 x float> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f16(
 define <vscale x 2 x float> @intrinsic_vfwadd_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f16(
     <vscale x 2 x float> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f16(
 define <vscale x 4 x float> @intrinsic_vfwadd_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f16(
     <vscale x 4 x float> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f16(
 define <vscale x 8 x float> @intrinsic_vfwadd_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f16(
     <vscale x 8 x float> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f16(
 define <vscale x 16 x float> @intrinsic_vfwadd_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f16(
     <vscale x 16 x float> %0,
@@ -228,7 +228,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f16.f16(
 define <vscale x 1 x float> @intrinsic_vfwadd_mask_vf_nxv1f16_f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv1f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f16.f16(
     <vscale x 1 x float> %0,
@@ -268,7 +268,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f16.f16(
 define <vscale x 2 x float> @intrinsic_vfwadd_mask_vf_nxv2f16_f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv2f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f16.f16(
     <vscale x 2 x float> %0,
@@ -308,7 +308,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f16.f16(
 define <vscale x 4 x float> @intrinsic_vfwadd_mask_vf_nxv4f16_f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv4f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f16.f16(
     <vscale x 4 x float> %0,
@@ -348,7 +348,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f16.f16(
 define <vscale x 8 x float> @intrinsic_vfwadd_mask_vf_nxv8f16_f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv8f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f16.f16(
     <vscale x 8 x float> %0,
@@ -388,7 +388,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f16.f16(
 define <vscale x 16 x float> @intrinsic_vfwadd_mask_vf_nxv16f16_f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv16f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f16.f16(
     <vscale x 16 x float> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv64.ll
index 0a7b41f64cf3..3e7fb6474b30 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f16(
 define <vscale x 1 x float> @intrinsic_vfwadd_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f16(
     <vscale x 1 x float> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f16(
 define <vscale x 2 x float> @intrinsic_vfwadd_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f16(
     <vscale x 2 x float> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f16(
 define <vscale x 4 x float> @intrinsic_vfwadd_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f16(
     <vscale x 4 x float> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f16(
 define <vscale x 8 x float> @intrinsic_vfwadd_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f16(
     <vscale x 8 x float> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f16(
 define <vscale x 16 x float> @intrinsic_vfwadd_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f16(
     <vscale x 16 x float> %0,
@@ -228,7 +228,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwadd.mask.nxv1f32(
 define <vscale x 1 x double> @intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.mask.nxv1f32(
     <vscale x 1 x double> %0,
@@ -268,7 +268,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfwadd.mask.nxv2f32(
 define <vscale x 2 x double> @intrinsic_vfwadd_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.mask.nxv2f32(
     <vscale x 2 x double> %0,
@@ -308,7 +308,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfwadd.mask.nxv4f32(
 define <vscale x 4 x double> @intrinsic_vfwadd_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.mask.nxv4f32(
     <vscale x 4 x double> %0,
@@ -348,7 +348,7 @@ declare <vscale x 8 x double> @llvm.riscv.vfwadd.mask.nxv8f32(
 define <vscale x 8 x double> @intrinsic_vfwadd_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.mask.nxv8f32(
     <vscale x 8 x double> %0,
@@ -388,7 +388,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f16.f16(
 define <vscale x 1 x float> @intrinsic_vfwadd_mask_vf_nxv1f16_f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv1f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f16.f16(
     <vscale x 1 x float> %0,
@@ -428,7 +428,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f16.f16(
 define <vscale x 2 x float> @intrinsic_vfwadd_mask_vf_nxv2f16_f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv2f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f16.f16(
     <vscale x 2 x float> %0,
@@ -468,7 +468,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f16.f16(
 define <vscale x 4 x float> @intrinsic_vfwadd_mask_vf_nxv4f16_f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv4f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f16.f16(
     <vscale x 4 x float> %0,
@@ -508,7 +508,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f16.f16(
 define <vscale x 8 x float> @intrinsic_vfwadd_mask_vf_nxv8f16_f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv8f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f16.f16(
     <vscale x 8 x float> %0,
@@ -548,7 +548,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f16.f16(
 define <vscale x 16 x float> @intrinsic_vfwadd_mask_vf_nxv16f16_f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv16f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f16.f16(
     <vscale x 16 x float> %0,
@@ -588,7 +588,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwadd.mask.nxv1f32.f32(
 define <vscale x 1 x double> @intrinsic_vfwadd_mask_vf_nxv1f32_f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv1f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.mask.nxv1f32.f32(
     <vscale x 1 x double> %0,
@@ -628,7 +628,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfwadd.mask.nxv2f32.f32(
 define <vscale x 2 x double> @intrinsic_vfwadd_mask_vf_nxv2f32_f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv2f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.mask.nxv2f32.f32(
     <vscale x 2 x double> %0,
@@ -668,7 +668,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfwadd.mask.nxv4f32.f32(
 define <vscale x 4 x double> @intrinsic_vfwadd_mask_vf_nxv4f32_f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv4f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.mask.nxv4f32.f32(
     <vscale x 4 x double> %0,
@@ -708,7 +708,7 @@ declare <vscale x 8 x double> @llvm.riscv.vfwadd.mask.nxv8f32.f32(
 define <vscale x 8 x double> @intrinsic_vfwadd_mask_vf_nxv8f32_f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv8f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.mask.nxv8f32.f32(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv32.ll
index 6537f2cdf66f..fae0b94a6c0b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f16(
 define <vscale x 1 x float> @intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f16(
     <vscale x 1 x float> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f16(
 define <vscale x 2 x float> @intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f16(
     <vscale x 2 x float> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f16(
 define <vscale x 4 x float> @intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f16(
     <vscale x 4 x float> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f16(
 define <vscale x 8 x float> @intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f16(
     <vscale x 8 x float> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f16(
 define <vscale x 16 x float> @intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f16(
     <vscale x 16 x float> %0,
@@ -228,7 +228,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.f16(
 define <vscale x 1 x float> @intrinsic_vfwadd.w_mask_wf_nxv1f32_f16(<vscale x 1 x float> %0, <vscale x 1 x float> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv1f32_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.f16(
     <vscale x 1 x float> %0,
@@ -268,7 +268,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.f16(
 define <vscale x 2 x float> @intrinsic_vfwadd.w_mask_wf_nxv2f32_f16(<vscale x 2 x float> %0, <vscale x 2 x float> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv2f32_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.f16(
     <vscale x 2 x float> %0,
@@ -308,7 +308,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.f16(
 define <vscale x 4 x float> @intrinsic_vfwadd.w_mask_wf_nxv4f32_f16(<vscale x 4 x float> %0, <vscale x 4 x float> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv4f32_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.f16(
     <vscale x 4 x float> %0,
@@ -348,7 +348,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.f16(
 define <vscale x 8 x float> @intrinsic_vfwadd.w_mask_wf_nxv8f32_f16(<vscale x 8 x float> %0, <vscale x 8 x float> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv8f32_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.f16(
     <vscale x 8 x float> %0,
@@ -388,7 +388,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.f16(
 define <vscale x 16 x float> @intrinsic_vfwadd.w_mask_wf_nxv16f32_f16(<vscale x 16 x float> %0, <vscale x 16 x float> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv16f32_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.f16(
     <vscale x 16 x float> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv64.ll
index f77d6e4805bb..02ab0ad81baf 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f16(
 define <vscale x 1 x float> @intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f16(
     <vscale x 1 x float> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f16(
 define <vscale x 2 x float> @intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f16(
     <vscale x 2 x float> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f16(
 define <vscale x 4 x float> @intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f16(
     <vscale x 4 x float> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f16(
 define <vscale x 8 x float> @intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f16(
     <vscale x 8 x float> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f16(
 define <vscale x 16 x float> @intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f16(
     <vscale x 16 x float> %0,
@@ -228,7 +228,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwadd.w.mask.nxv1f32(
 define <vscale x 1 x double> @intrinsic_vfwadd.w_mask_wv_nxv1f64_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv1f64_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.mask.nxv1f32(
     <vscale x 1 x double> %0,
@@ -268,7 +268,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfwadd.w.mask.nxv2f32(
 define <vscale x 2 x double> @intrinsic_vfwadd.w_mask_wv_nxv2f64_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv2f64_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.mask.nxv2f32(
     <vscale x 2 x double> %0,
@@ -308,7 +308,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfwadd.w.mask.nxv4f32(
 define <vscale x 4 x double> @intrinsic_vfwadd.w_mask_wv_nxv4f64_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv4f64_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.mask.nxv4f32(
     <vscale x 4 x double> %0,
@@ -348,7 +348,7 @@ declare <vscale x 8 x double> @llvm.riscv.vfwadd.w.mask.nxv8f32(
 define <vscale x 8 x double> @intrinsic_vfwadd.w_mask_wv_nxv8f64_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv8f64_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.mask.nxv8f32(
     <vscale x 8 x double> %0,
@@ -388,7 +388,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.f16(
 define <vscale x 1 x float> @intrinsic_vfwadd.w_mask_wf_nxv1f32_f16(<vscale x 1 x float> %0, <vscale x 1 x float> %1, half %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv1f32_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.f16(
     <vscale x 1 x float> %0,
@@ -428,7 +428,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.f16(
 define <vscale x 2 x float> @intrinsic_vfwadd.w_mask_wf_nxv2f32_f16(<vscale x 2 x float> %0, <vscale x 2 x float> %1, half %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv2f32_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.f16(
     <vscale x 2 x float> %0,
@@ -468,7 +468,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.f16(
 define <vscale x 4 x float> @intrinsic_vfwadd.w_mask_wf_nxv4f32_f16(<vscale x 4 x float> %0, <vscale x 4 x float> %1, half %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv4f32_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.f16(
     <vscale x 4 x float> %0,
@@ -508,7 +508,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.f16(
 define <vscale x 8 x float> @intrinsic_vfwadd.w_mask_wf_nxv8f32_f16(<vscale x 8 x float> %0, <vscale x 8 x float> %1, half %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv8f32_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.f16(
     <vscale x 8 x float> %0,
@@ -548,7 +548,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.f16(
 define <vscale x 16 x float> @intrinsic_vfwadd.w_mask_wf_nxv16f32_f16(<vscale x 16 x float> %0, <vscale x 16 x float> %1, half %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv16f32_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.f16(
     <vscale x 16 x float> %0,
@@ -588,7 +588,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwadd.w.mask.nxv1f64.f32(
 define <vscale x 1 x double> @intrinsic_vfwadd.w_mask_wf_nxv1f64_f32(<vscale x 1 x double> %0, <vscale x 1 x double> %1, float %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv1f64_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.mask.nxv1f64.f32(
     <vscale x 1 x double> %0,
@@ -628,7 +628,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfwadd.w.mask.nxv2f64.f32(
 define <vscale x 2 x double> @intrinsic_vfwadd.w_mask_wf_nxv2f64_f32(<vscale x 2 x double> %0, <vscale x 2 x double> %1, float %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv2f64_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.mask.nxv2f64.f32(
     <vscale x 2 x double> %0,
@@ -668,7 +668,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfwadd.w.mask.nxv4f64.f32(
 define <vscale x 4 x double> @intrinsic_vfwadd.w_mask_wf_nxv4f64_f32(<vscale x 4 x double> %0, <vscale x 4 x double> %1, float %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv4f64_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.mask.nxv4f64.f32(
     <vscale x 4 x double> %0,
@@ -708,7 +708,7 @@ declare <vscale x 8 x double> @llvm.riscv.vfwadd.w.mask.nxv8f64.f32(
 define <vscale x 8 x double> @intrinsic_vfwadd.w_mask_wf_nxv8f64_f32(<vscale x 8 x double> %0, <vscale x 8 x double> %1, float %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv8f64_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.mask.nxv8f64.f32(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv32.ll
index 6825ecb121b3..ec934b3bf12d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv32.ll
@@ -10,7 +10,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.nxv1f16(
 define <vscale x 1 x float>  @intrinsic_vfwmacc_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv1f32_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1f16(
 define <vscale x 1 x float>  @intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.nxv2f16(
 define <vscale x 2 x float>  @intrinsic_vfwmacc_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv2f32_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2f16(
 define <vscale x 2 x float>  @intrinsic_vfwmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.nxv4f16(
 define <vscale x 4 x float>  @intrinsic_vfwmacc_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv4f32_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v16, v18, v19
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4f16(
 define <vscale x 4 x float>  @intrinsic_vfwmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v16, v18, v19, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.nxv8f16(
 define <vscale x 8 x float>  @intrinsic_vfwmacc_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv8f32_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v16, v20, v22
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8f16(
 define <vscale x 8 x float>  @intrinsic_vfwmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v16, v20, v22, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -197,7 +197,7 @@ define <vscale x 16 x float>  @intrinsic_vfwmacc_vv_nxv16f32_nxv16f16_nxv16f16(<
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a1)
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v16, v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -223,7 +223,7 @@ define <vscale x 16 x float>  @intrinsic_vfwmacc_mask_vv_nxv16f32_nxv16f16_nxv16
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a1)
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v16, v8, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -247,7 +247,7 @@ define <vscale x 1 x float>  @intrinsic_vfwmacc_vf_nxv1f32_f16_nxv1f16(<vscale x
 ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv1f32_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -271,7 +271,7 @@ define <vscale x 1 x float> @intrinsic_vfwmacc_mask_vf_nxv1f32_f16_nxv1f16(<vsca
 ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv1f32_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -295,7 +295,7 @@ define <vscale x 2 x float>  @intrinsic_vfwmacc_vf_nxv2f32_f16_nxv2f16(<vscale x
 ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv2f32_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -319,7 +319,7 @@ define <vscale x 2 x float> @intrinsic_vfwmacc_mask_vf_nxv2f32_f16_nxv2f16(<vsca
 ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv2f32_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -343,7 +343,7 @@ define <vscale x 4 x float>  @intrinsic_vfwmacc_vf_nxv4f32_f16_nxv4f16(<vscale x
 ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv4f32_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v16, ft0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -367,7 +367,7 @@ define <vscale x 4 x float> @intrinsic_vfwmacc_mask_vf_nxv4f32_f16_nxv4f16(<vsca
 ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv4f32_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v16, ft0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -391,7 +391,7 @@ define <vscale x 8 x float>  @intrinsic_vfwmacc_vf_nxv8f32_f16_nxv8f16(<vscale x
 ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv8f32_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v16, ft0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -415,7 +415,7 @@ define <vscale x 8 x float> @intrinsic_vfwmacc_mask_vf_nxv8f32_f16_nxv8f16(<vsca
 ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv8f32_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v16, ft0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -441,7 +441,7 @@ define <vscale x 16 x float>  @intrinsic_vfwmacc_vf_nxv16f32_f16_nxv16f16(<vscal
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a1)
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v16, ft0, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -467,7 +467,7 @@ define <vscale x 16 x float> @intrinsic_vfwmacc_mask_vf_nxv16f32_f16_nxv16f16(<v
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a1)
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v16, ft0, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv64.ll
index ccd970ef6af3..25449e11da62 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv64.ll
@@ -10,7 +10,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.nxv1f16(
 define <vscale x 1 x float>  @intrinsic_vfwmacc_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv1f32_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1f16(
 define <vscale x 1 x float>  @intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.nxv2f16(
 define <vscale x 2 x float>  @intrinsic_vfwmacc_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv2f32_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2f16(
 define <vscale x 2 x float>  @intrinsic_vfwmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.nxv4f16(
 define <vscale x 4 x float>  @intrinsic_vfwmacc_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv4f32_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v16, v18, v19
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4f16(
 define <vscale x 4 x float>  @intrinsic_vfwmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v16, v18, v19, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.nxv8f16(
 define <vscale x 8 x float>  @intrinsic_vfwmacc_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv8f32_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v16, v20, v22
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8f16(
 define <vscale x 8 x float>  @intrinsic_vfwmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v16, v20, v22, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -197,7 +197,7 @@ define <vscale x 16 x float>  @intrinsic_vfwmacc_vv_nxv16f32_nxv16f16_nxv16f16(<
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a1)
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v16, v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -223,7 +223,7 @@ define <vscale x 16 x float>  @intrinsic_vfwmacc_mask_vv_nxv16f32_nxv16f16_nxv16
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a1)
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v16, v8, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -246,7 +246,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwmacc.nxv1f64.nxv1f32(
 define <vscale x 1 x double>  @intrinsic_vfwmacc_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv1f64_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -269,7 +269,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwmacc.mask.nxv1f64.nxv1f32(
 define <vscale x 1 x double>  @intrinsic_vfwmacc_mask_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv1f64_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -292,7 +292,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfwmacc.nxv2f64.nxv2f32(
 define <vscale x 2 x double>  @intrinsic_vfwmacc_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv2f64_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v16, v18, v19
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -315,7 +315,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfwmacc.mask.nxv2f64.nxv2f32(
 define <vscale x 2 x double>  @intrinsic_vfwmacc_mask_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv2f64_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v16, v18, v19, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -338,7 +338,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfwmacc.nxv4f64.nxv4f32(
 define <vscale x 4 x double>  @intrinsic_vfwmacc_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv4f64_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v16, v20, v22
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -361,7 +361,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfwmacc.mask.nxv4f64.nxv4f32(
 define <vscale x 4 x double>  @intrinsic_vfwmacc_mask_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv4f64_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v16, v20, v22, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -387,7 +387,7 @@ define <vscale x 8 x double>  @intrinsic_vfwmacc_vv_nxv8f64_nxv8f32_nxv8f32(<vsc
 ; CHECK-NEXT:    vsetvli a3, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a1)
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v16, v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -413,7 +413,7 @@ define <vscale x 8 x double>  @intrinsic_vfwmacc_mask_vv_nxv8f64_nxv8f32_nxv8f32
 ; CHECK-NEXT:    vsetvli a3, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a1)
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v16, v8, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -437,7 +437,7 @@ define <vscale x 1 x float>  @intrinsic_vfwmacc_vf_nxv1f32_f16_nxv1f16(<vscale x
 ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv1f32_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -461,7 +461,7 @@ define <vscale x 1 x float> @intrinsic_vfwmacc_mask_vf_nxv1f32_f16_nxv1f16(<vsca
 ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv1f32_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -485,7 +485,7 @@ define <vscale x 2 x float>  @intrinsic_vfwmacc_vf_nxv2f32_f16_nxv2f16(<vscale x
 ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv2f32_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -509,7 +509,7 @@ define <vscale x 2 x float> @intrinsic_vfwmacc_mask_vf_nxv2f32_f16_nxv2f16(<vsca
 ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv2f32_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -533,7 +533,7 @@ define <vscale x 4 x float>  @intrinsic_vfwmacc_vf_nxv4f32_f16_nxv4f16(<vscale x
 ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv4f32_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v16, ft0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -557,7 +557,7 @@ define <vscale x 4 x float> @intrinsic_vfwmacc_mask_vf_nxv4f32_f16_nxv4f16(<vsca
 ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv4f32_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v16, ft0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -581,7 +581,7 @@ define <vscale x 8 x float>  @intrinsic_vfwmacc_vf_nxv8f32_f16_nxv8f16(<vscale x
 ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv8f32_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v16, ft0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -605,7 +605,7 @@ define <vscale x 8 x float> @intrinsic_vfwmacc_mask_vf_nxv8f32_f16_nxv8f16(<vsca
 ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv8f32_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v16, ft0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -631,7 +631,7 @@ define <vscale x 16 x float>  @intrinsic_vfwmacc_vf_nxv16f32_f16_nxv16f16(<vscal
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a1)
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v16, ft0, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -657,7 +657,7 @@ define <vscale x 16 x float> @intrinsic_vfwmacc_mask_vf_nxv16f32_f16_nxv16f16(<v
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a1)
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v16, ft0, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -681,7 +681,7 @@ define <vscale x 1 x double>  @intrinsic_vfwmacc_vf_nxv1f64_f32_nxv1f32(<vscale
 ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv1f64_f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -705,7 +705,7 @@ define <vscale x 1 x double> @intrinsic_vfwmacc_mask_vf_nxv1f64_f32_nxv1f32(<vsc
 ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv1f64_f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -729,7 +729,7 @@ define <vscale x 2 x double>  @intrinsic_vfwmacc_vf_nxv2f64_f32_nxv2f32(<vscale
 ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv2f64_f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v16, ft0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -753,7 +753,7 @@ define <vscale x 2 x double> @intrinsic_vfwmacc_mask_vf_nxv2f64_f32_nxv2f32(<vsc
 ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv2f64_f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v16, ft0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -777,7 +777,7 @@ define <vscale x 4 x double>  @intrinsic_vfwmacc_vf_nxv4f64_f32_nxv4f32(<vscale
 ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv4f64_f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v16, ft0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -801,7 +801,7 @@ define <vscale x 4 x double> @intrinsic_vfwmacc_mask_vf_nxv4f64_f32_nxv4f32(<vsc
 ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv4f64_f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v16, ft0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -827,7 +827,7 @@ define <vscale x 8 x double>  @intrinsic_vfwmacc_vf_nxv8f64_f32_nxv8f32(<vscale
 ; CHECK-NEXT:    vsetvli a3, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a1)
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a2, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v16, ft0, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -853,7 +853,7 @@ define <vscale x 8 x double> @intrinsic_vfwmacc_mask_vf_nxv8f64_f32_nxv8f32(<vsc
 ; CHECK-NEXT:    vsetvli a3, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a1)
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a2, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v16, ft0, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv32.ll
index 27b22f94f12a..decd4c73f2f4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv32.ll
@@ -10,7 +10,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.nxv1f16(
 define <vscale x 1 x float>  @intrinsic_vfwmsac_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv1f32_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1f16(
 define <vscale x 1 x float>  @intrinsic_vfwmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.nxv2f16(
 define <vscale x 2 x float>  @intrinsic_vfwmsac_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv2f32_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2f16(
 define <vscale x 2 x float>  @intrinsic_vfwmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.nxv4f16(
 define <vscale x 4 x float>  @intrinsic_vfwmsac_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv4f32_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v16, v18, v19
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4f16(
 define <vscale x 4 x float>  @intrinsic_vfwmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v16, v18, v19, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.nxv8f16(
 define <vscale x 8 x float>  @intrinsic_vfwmsac_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv8f32_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v16, v20, v22
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8f16(
 define <vscale x 8 x float>  @intrinsic_vfwmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v16, v20, v22, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -197,7 +197,7 @@ define <vscale x 16 x float>  @intrinsic_vfwmsac_vv_nxv16f32_nxv16f16_nxv16f16(<
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a1)
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v16, v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -223,7 +223,7 @@ define <vscale x 16 x float>  @intrinsic_vfwmsac_mask_vv_nxv16f32_nxv16f16_nxv16
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a1)
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v16, v8, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -247,7 +247,7 @@ define <vscale x 1 x float>  @intrinsic_vfwmsac_vf_nxv1f32_f16_nxv1f16(<vscale x
 ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv1f32_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -271,7 +271,7 @@ define <vscale x 1 x float> @intrinsic_vfwmsac_mask_vf_nxv1f32_f16_nxv1f16(<vsca
 ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv1f32_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -295,7 +295,7 @@ define <vscale x 2 x float>  @intrinsic_vfwmsac_vf_nxv2f32_f16_nxv2f16(<vscale x
 ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv2f32_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -319,7 +319,7 @@ define <vscale x 2 x float> @intrinsic_vfwmsac_mask_vf_nxv2f32_f16_nxv2f16(<vsca
 ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv2f32_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -343,7 +343,7 @@ define <vscale x 4 x float>  @intrinsic_vfwmsac_vf_nxv4f32_f16_nxv4f16(<vscale x
 ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv4f32_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v16, ft0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -367,7 +367,7 @@ define <vscale x 4 x float> @intrinsic_vfwmsac_mask_vf_nxv4f32_f16_nxv4f16(<vsca
 ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv4f32_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v16, ft0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -391,7 +391,7 @@ define <vscale x 8 x float>  @intrinsic_vfwmsac_vf_nxv8f32_f16_nxv8f16(<vscale x
 ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv8f32_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v16, ft0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -415,7 +415,7 @@ define <vscale x 8 x float> @intrinsic_vfwmsac_mask_vf_nxv8f32_f16_nxv8f16(<vsca
 ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv8f32_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v16, ft0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -441,7 +441,7 @@ define <vscale x 16 x float>  @intrinsic_vfwmsac_vf_nxv16f32_f16_nxv16f16(<vscal
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a1)
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v16, ft0, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -467,7 +467,7 @@ define <vscale x 16 x float> @intrinsic_vfwmsac_mask_vf_nxv16f32_f16_nxv16f16(<v
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a1)
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v16, ft0, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv64.ll
index b5149780a79e..3a139a45926c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv64.ll
@@ -10,7 +10,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.nxv1f16(
 define <vscale x 1 x float>  @intrinsic_vfwmsac_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv1f32_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1f16(
 define <vscale x 1 x float>  @intrinsic_vfwmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.nxv2f16(
 define <vscale x 2 x float>  @intrinsic_vfwmsac_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv2f32_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2f16(
 define <vscale x 2 x float>  @intrinsic_vfwmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.nxv4f16(
 define <vscale x 4 x float>  @intrinsic_vfwmsac_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv4f32_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v16, v18, v19
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4f16(
 define <vscale x 4 x float>  @intrinsic_vfwmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v16, v18, v19, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.nxv8f16(
 define <vscale x 8 x float>  @intrinsic_vfwmsac_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv8f32_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v16, v20, v22
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8f16(
 define <vscale x 8 x float>  @intrinsic_vfwmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v16, v20, v22, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -197,7 +197,7 @@ define <vscale x 16 x float>  @intrinsic_vfwmsac_vv_nxv16f32_nxv16f16_nxv16f16(<
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a1)
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v16, v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -223,7 +223,7 @@ define <vscale x 16 x float>  @intrinsic_vfwmsac_mask_vv_nxv16f32_nxv16f16_nxv16
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a1)
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v16, v8, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -246,7 +246,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwmsac.nxv1f64.nxv1f32(
 define <vscale x 1 x double>  @intrinsic_vfwmsac_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv1f64_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -269,7 +269,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwmsac.mask.nxv1f64.nxv1f32(
 define <vscale x 1 x double>  @intrinsic_vfwmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -292,7 +292,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfwmsac.nxv2f64.nxv2f32(
 define <vscale x 2 x double>  @intrinsic_vfwmsac_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv2f64_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v16, v18, v19
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -315,7 +315,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfwmsac.mask.nxv2f64.nxv2f32(
 define <vscale x 2 x double>  @intrinsic_vfwmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v16, v18, v19, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -338,7 +338,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfwmsac.nxv4f64.nxv4f32(
 define <vscale x 4 x double>  @intrinsic_vfwmsac_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv4f64_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v16, v20, v22
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -361,7 +361,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfwmsac.mask.nxv4f64.nxv4f32(
 define <vscale x 4 x double>  @intrinsic_vfwmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v16, v20, v22, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -387,7 +387,7 @@ define <vscale x 8 x double>  @intrinsic_vfwmsac_vv_nxv8f64_nxv8f32_nxv8f32(<vsc
 ; CHECK-NEXT:    vsetvli a3, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a1)
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v16, v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -413,7 +413,7 @@ define <vscale x 8 x double>  @intrinsic_vfwmsac_mask_vv_nxv8f64_nxv8f32_nxv8f32
 ; CHECK-NEXT:    vsetvli a3, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a1)
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v16, v8, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -437,7 +437,7 @@ define <vscale x 1 x float>  @intrinsic_vfwmsac_vf_nxv1f32_f16_nxv1f16(<vscale x
 ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv1f32_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -461,7 +461,7 @@ define <vscale x 1 x float> @intrinsic_vfwmsac_mask_vf_nxv1f32_f16_nxv1f16(<vsca
 ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv1f32_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -485,7 +485,7 @@ define <vscale x 2 x float>  @intrinsic_vfwmsac_vf_nxv2f32_f16_nxv2f16(<vscale x
 ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv2f32_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -509,7 +509,7 @@ define <vscale x 2 x float> @intrinsic_vfwmsac_mask_vf_nxv2f32_f16_nxv2f16(<vsca
 ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv2f32_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -533,7 +533,7 @@ define <vscale x 4 x float>  @intrinsic_vfwmsac_vf_nxv4f32_f16_nxv4f16(<vscale x
 ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv4f32_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v16, ft0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -557,7 +557,7 @@ define <vscale x 4 x float> @intrinsic_vfwmsac_mask_vf_nxv4f32_f16_nxv4f16(<vsca
 ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv4f32_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v16, ft0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -581,7 +581,7 @@ define <vscale x 8 x float>  @intrinsic_vfwmsac_vf_nxv8f32_f16_nxv8f16(<vscale x
 ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv8f32_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v16, ft0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -605,7 +605,7 @@ define <vscale x 8 x float> @intrinsic_vfwmsac_mask_vf_nxv8f32_f16_nxv8f16(<vsca
 ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv8f32_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v16, ft0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -631,7 +631,7 @@ define <vscale x 16 x float>  @intrinsic_vfwmsac_vf_nxv16f32_f16_nxv16f16(<vscal
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a1)
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v16, ft0, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -657,7 +657,7 @@ define <vscale x 16 x float> @intrinsic_vfwmsac_mask_vf_nxv16f32_f16_nxv16f16(<v
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a1)
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v16, ft0, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -681,7 +681,7 @@ define <vscale x 1 x double>  @intrinsic_vfwmsac_vf_nxv1f64_f32_nxv1f32(<vscale
 ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv1f64_f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -705,7 +705,7 @@ define <vscale x 1 x double> @intrinsic_vfwmsac_mask_vf_nxv1f64_f32_nxv1f32(<vsc
 ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv1f64_f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -729,7 +729,7 @@ define <vscale x 2 x double>  @intrinsic_vfwmsac_vf_nxv2f64_f32_nxv2f32(<vscale
 ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv2f64_f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v16, ft0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -753,7 +753,7 @@ define <vscale x 2 x double> @intrinsic_vfwmsac_mask_vf_nxv2f64_f32_nxv2f32(<vsc
 ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv2f64_f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v16, ft0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -777,7 +777,7 @@ define <vscale x 4 x double>  @intrinsic_vfwmsac_vf_nxv4f64_f32_nxv4f32(<vscale
 ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv4f64_f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v16, ft0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -801,7 +801,7 @@ define <vscale x 4 x double> @intrinsic_vfwmsac_mask_vf_nxv4f64_f32_nxv4f32(<vsc
 ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv4f64_f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v16, ft0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -827,7 +827,7 @@ define <vscale x 8 x double>  @intrinsic_vfwmsac_vf_nxv8f64_f32_nxv8f32(<vscale
 ; CHECK-NEXT:    vsetvli a3, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a1)
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a2, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v16, ft0, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -853,7 +853,7 @@ define <vscale x 8 x double> @intrinsic_vfwmsac_mask_vf_nxv8f64_f32_nxv8f32(<vsc
 ; CHECK-NEXT:    vsetvli a3, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a1)
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a2, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v16, ft0, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv32.ll
index 80448534d1c1..b4d3d4086811 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f16(
 define <vscale x 1 x float> @intrinsic_vfwmul_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f16(
     <vscale x 1 x float> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f16(
 define <vscale x 2 x float> @intrinsic_vfwmul_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f16(
     <vscale x 2 x float> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f16(
 define <vscale x 4 x float> @intrinsic_vfwmul_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f16(
     <vscale x 4 x float> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f16(
 define <vscale x 8 x float> @intrinsic_vfwmul_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f16(
     <vscale x 8 x float> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f16(
 define <vscale x 16 x float> @intrinsic_vfwmul_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f16(
     <vscale x 16 x float> %0,
@@ -228,7 +228,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f16.f16(
 define <vscale x 1 x float> @intrinsic_vfwmul_mask_vf_nxv1f16_f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv1f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f16.f16(
     <vscale x 1 x float> %0,
@@ -268,7 +268,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f16.f16(
 define <vscale x 2 x float> @intrinsic_vfwmul_mask_vf_nxv2f16_f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv2f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f16.f16(
     <vscale x 2 x float> %0,
@@ -308,7 +308,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f16.f16(
 define <vscale x 4 x float> @intrinsic_vfwmul_mask_vf_nxv4f16_f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv4f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f16.f16(
     <vscale x 4 x float> %0,
@@ -348,7 +348,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f16.f16(
 define <vscale x 8 x float> @intrinsic_vfwmul_mask_vf_nxv8f16_f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv8f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f16.f16(
     <vscale x 8 x float> %0,
@@ -388,7 +388,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f16.f16(
 define <vscale x 16 x float> @intrinsic_vfwmul_mask_vf_nxv16f16_f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv16f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f16.f16(
     <vscale x 16 x float> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv64.ll
index 82f5b0fa957e..f84739475ee6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f16(
 define <vscale x 1 x float> @intrinsic_vfwmul_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f16(
     <vscale x 1 x float> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f16(
 define <vscale x 2 x float> @intrinsic_vfwmul_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f16(
     <vscale x 2 x float> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f16(
 define <vscale x 4 x float> @intrinsic_vfwmul_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f16(
     <vscale x 4 x float> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f16(
 define <vscale x 8 x float> @intrinsic_vfwmul_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f16(
     <vscale x 8 x float> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f16(
 define <vscale x 16 x float> @intrinsic_vfwmul_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f16(
     <vscale x 16 x float> %0,
@@ -228,7 +228,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwmul.mask.nxv1f32(
 define <vscale x 1 x double> @intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x double> @llvm.riscv.vfwmul.mask.nxv1f32(
     <vscale x 1 x double> %0,
@@ -268,7 +268,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfwmul.mask.nxv2f32(
 define <vscale x 2 x double> @intrinsic_vfwmul_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x double> @llvm.riscv.vfwmul.mask.nxv2f32(
     <vscale x 2 x double> %0,
@@ -308,7 +308,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfwmul.mask.nxv4f32(
 define <vscale x 4 x double> @intrinsic_vfwmul_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x double> @llvm.riscv.vfwmul.mask.nxv4f32(
     <vscale x 4 x double> %0,
@@ -348,7 +348,7 @@ declare <vscale x 8 x double> @llvm.riscv.vfwmul.mask.nxv8f32(
 define <vscale x 8 x double> @intrinsic_vfwmul_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x double> @llvm.riscv.vfwmul.mask.nxv8f32(
     <vscale x 8 x double> %0,
@@ -388,7 +388,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f16.f16(
 define <vscale x 1 x float> @intrinsic_vfwmul_mask_vf_nxv1f16_f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv1f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f16.f16(
     <vscale x 1 x float> %0,
@@ -428,7 +428,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f16.f16(
 define <vscale x 2 x float> @intrinsic_vfwmul_mask_vf_nxv2f16_f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv2f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f16.f16(
     <vscale x 2 x float> %0,
@@ -468,7 +468,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f16.f16(
 define <vscale x 4 x float> @intrinsic_vfwmul_mask_vf_nxv4f16_f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv4f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f16.f16(
     <vscale x 4 x float> %0,
@@ -508,7 +508,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f16.f16(
 define <vscale x 8 x float> @intrinsic_vfwmul_mask_vf_nxv8f16_f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv8f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f16.f16(
     <vscale x 8 x float> %0,
@@ -548,7 +548,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f16.f16(
 define <vscale x 16 x float> @intrinsic_vfwmul_mask_vf_nxv16f16_f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv16f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f16.f16(
     <vscale x 16 x float> %0,
@@ -588,7 +588,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwmul.mask.nxv1f32.f32(
 define <vscale x 1 x double> @intrinsic_vfwmul_mask_vf_nxv1f32_f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv1f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x double> @llvm.riscv.vfwmul.mask.nxv1f32.f32(
     <vscale x 1 x double> %0,
@@ -628,7 +628,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfwmul.mask.nxv2f32.f32(
 define <vscale x 2 x double> @intrinsic_vfwmul_mask_vf_nxv2f32_f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv2f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x double> @llvm.riscv.vfwmul.mask.nxv2f32.f32(
     <vscale x 2 x double> %0,
@@ -668,7 +668,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfwmul.mask.nxv4f32.f32(
 define <vscale x 4 x double> @intrinsic_vfwmul_mask_vf_nxv4f32_f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv4f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x double> @llvm.riscv.vfwmul.mask.nxv4f32.f32(
     <vscale x 4 x double> %0,
@@ -708,7 +708,7 @@ declare <vscale x 8 x double> @llvm.riscv.vfwmul.mask.nxv8f32.f32(
 define <vscale x 8 x double> @intrinsic_vfwmul_mask_vf_nxv8f32_f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv8f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x double> @llvm.riscv.vfwmul.mask.nxv8f32.f32(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv32.ll
index 1b00a96f93d2..530f41b8d05a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv32.ll
@@ -10,7 +10,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.nxv1f16(
 define <vscale x 1 x float>  @intrinsic_vfwnmacc_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv1f32_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1f16(
 define <vscale x 1 x float>  @intrinsic_vfwnmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.nxv2f16(
 define <vscale x 2 x float>  @intrinsic_vfwnmacc_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv2f32_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2f16(
 define <vscale x 2 x float>  @intrinsic_vfwnmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.nxv4f16(
 define <vscale x 4 x float>  @intrinsic_vfwnmacc_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv4f32_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v16, v18, v19
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4f16(
 define <vscale x 4 x float>  @intrinsic_vfwnmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v16, v18, v19, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.nxv8f16(
 define <vscale x 8 x float>  @intrinsic_vfwnmacc_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv8f32_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v16, v20, v22
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8f16(
 define <vscale x 8 x float>  @intrinsic_vfwnmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v16, v20, v22, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -197,7 +197,7 @@ define <vscale x 16 x float>  @intrinsic_vfwnmacc_vv_nxv16f32_nxv16f16_nxv16f16(
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a1)
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v16, v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -223,7 +223,7 @@ define <vscale x 16 x float>  @intrinsic_vfwnmacc_mask_vv_nxv16f32_nxv16f16_nxv1
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a1)
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v16, v8, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -247,7 +247,7 @@ define <vscale x 1 x float>  @intrinsic_vfwnmacc_vf_nxv1f32_f16_nxv1f16(<vscale
 ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv1f32_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -271,7 +271,7 @@ define <vscale x 1 x float> @intrinsic_vfwnmacc_mask_vf_nxv1f32_f16_nxv1f16(<vsc
 ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv1f32_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -295,7 +295,7 @@ define <vscale x 2 x float>  @intrinsic_vfwnmacc_vf_nxv2f32_f16_nxv2f16(<vscale
 ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv2f32_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -319,7 +319,7 @@ define <vscale x 2 x float> @intrinsic_vfwnmacc_mask_vf_nxv2f32_f16_nxv2f16(<vsc
 ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv2f32_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -343,7 +343,7 @@ define <vscale x 4 x float>  @intrinsic_vfwnmacc_vf_nxv4f32_f16_nxv4f16(<vscale
 ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv4f32_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v16, ft0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -367,7 +367,7 @@ define <vscale x 4 x float> @intrinsic_vfwnmacc_mask_vf_nxv4f32_f16_nxv4f16(<vsc
 ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv4f32_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v16, ft0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -391,7 +391,7 @@ define <vscale x 8 x float>  @intrinsic_vfwnmacc_vf_nxv8f32_f16_nxv8f16(<vscale
 ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv8f32_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v16, ft0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -415,7 +415,7 @@ define <vscale x 8 x float> @intrinsic_vfwnmacc_mask_vf_nxv8f32_f16_nxv8f16(<vsc
 ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv8f32_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v16, ft0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -441,7 +441,7 @@ define <vscale x 16 x float>  @intrinsic_vfwnmacc_vf_nxv16f32_f16_nxv16f16(<vsca
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a1)
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v16, ft0, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -467,7 +467,7 @@ define <vscale x 16 x float> @intrinsic_vfwnmacc_mask_vf_nxv16f32_f16_nxv16f16(<
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a1)
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v16, ft0, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv64.ll
index 66d284f1067d..b71161cfdbfa 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv64.ll
@@ -10,7 +10,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.nxv1f16(
 define <vscale x 1 x float>  @intrinsic_vfwnmacc_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv1f32_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1f16(
 define <vscale x 1 x float>  @intrinsic_vfwnmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.nxv2f16(
 define <vscale x 2 x float>  @intrinsic_vfwnmacc_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv2f32_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2f16(
 define <vscale x 2 x float>  @intrinsic_vfwnmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.nxv4f16(
 define <vscale x 4 x float>  @intrinsic_vfwnmacc_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv4f32_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v16, v18, v19
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4f16(
 define <vscale x 4 x float>  @intrinsic_vfwnmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v16, v18, v19, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.nxv8f16(
 define <vscale x 8 x float>  @intrinsic_vfwnmacc_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv8f32_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v16, v20, v22
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8f16(
 define <vscale x 8 x float>  @intrinsic_vfwnmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v16, v20, v22, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -197,7 +197,7 @@ define <vscale x 16 x float>  @intrinsic_vfwnmacc_vv_nxv16f32_nxv16f16_nxv16f16(
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a1)
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v16, v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -223,7 +223,7 @@ define <vscale x 16 x float>  @intrinsic_vfwnmacc_mask_vv_nxv16f32_nxv16f16_nxv1
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a1)
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v16, v8, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -246,7 +246,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwnmacc.nxv1f64.nxv1f32(
 define <vscale x 1 x double>  @intrinsic_vfwnmacc_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv1f64_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -269,7 +269,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwnmacc.mask.nxv1f64.nxv1f32(
 define <vscale x 1 x double>  @intrinsic_vfwnmacc_mask_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv1f64_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -292,7 +292,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfwnmacc.nxv2f64.nxv2f32(
 define <vscale x 2 x double>  @intrinsic_vfwnmacc_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv2f64_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v16, v18, v19
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -315,7 +315,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfwnmacc.mask.nxv2f64.nxv2f32(
 define <vscale x 2 x double>  @intrinsic_vfwnmacc_mask_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv2f64_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v16, v18, v19, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -338,7 +338,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfwnmacc.nxv4f64.nxv4f32(
 define <vscale x 4 x double>  @intrinsic_vfwnmacc_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv4f64_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v16, v20, v22
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -361,7 +361,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfwnmacc.mask.nxv4f64.nxv4f32(
 define <vscale x 4 x double>  @intrinsic_vfwnmacc_mask_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv4f64_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v16, v20, v22, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -387,7 +387,7 @@ define <vscale x 8 x double>  @intrinsic_vfwnmacc_vv_nxv8f64_nxv8f32_nxv8f32(<vs
 ; CHECK-NEXT:    vsetvli a3, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a1)
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v16, v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -413,7 +413,7 @@ define <vscale x 8 x double>  @intrinsic_vfwnmacc_mask_vv_nxv8f64_nxv8f32_nxv8f3
 ; CHECK-NEXT:    vsetvli a3, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a1)
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v16, v8, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -437,7 +437,7 @@ define <vscale x 1 x float>  @intrinsic_vfwnmacc_vf_nxv1f32_f16_nxv1f16(<vscale
 ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv1f32_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -461,7 +461,7 @@ define <vscale x 1 x float> @intrinsic_vfwnmacc_mask_vf_nxv1f32_f16_nxv1f16(<vsc
 ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv1f32_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -485,7 +485,7 @@ define <vscale x 2 x float>  @intrinsic_vfwnmacc_vf_nxv2f32_f16_nxv2f16(<vscale
 ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv2f32_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -509,7 +509,7 @@ define <vscale x 2 x float> @intrinsic_vfwnmacc_mask_vf_nxv2f32_f16_nxv2f16(<vsc
 ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv2f32_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -533,7 +533,7 @@ define <vscale x 4 x float>  @intrinsic_vfwnmacc_vf_nxv4f32_f16_nxv4f16(<vscale
 ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv4f32_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v16, ft0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -557,7 +557,7 @@ define <vscale x 4 x float> @intrinsic_vfwnmacc_mask_vf_nxv4f32_f16_nxv4f16(<vsc
 ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv4f32_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v16, ft0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -581,7 +581,7 @@ define <vscale x 8 x float>  @intrinsic_vfwnmacc_vf_nxv8f32_f16_nxv8f16(<vscale
 ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv8f32_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v16, ft0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -605,7 +605,7 @@ define <vscale x 8 x float> @intrinsic_vfwnmacc_mask_vf_nxv8f32_f16_nxv8f16(<vsc
 ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv8f32_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v16, ft0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -631,7 +631,7 @@ define <vscale x 16 x float>  @intrinsic_vfwnmacc_vf_nxv16f32_f16_nxv16f16(<vsca
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a1)
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v16, ft0, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -657,7 +657,7 @@ define <vscale x 16 x float> @intrinsic_vfwnmacc_mask_vf_nxv16f32_f16_nxv16f16(<
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a1)
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v16, ft0, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -681,7 +681,7 @@ define <vscale x 1 x double>  @intrinsic_vfwnmacc_vf_nxv1f64_f32_nxv1f32(<vscale
 ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv1f64_f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -705,7 +705,7 @@ define <vscale x 1 x double> @intrinsic_vfwnmacc_mask_vf_nxv1f64_f32_nxv1f32(<vs
 ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv1f64_f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -729,7 +729,7 @@ define <vscale x 2 x double>  @intrinsic_vfwnmacc_vf_nxv2f64_f32_nxv2f32(<vscale
 ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv2f64_f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v16, ft0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -753,7 +753,7 @@ define <vscale x 2 x double> @intrinsic_vfwnmacc_mask_vf_nxv2f64_f32_nxv2f32(<vs
 ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv2f64_f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v16, ft0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -777,7 +777,7 @@ define <vscale x 4 x double>  @intrinsic_vfwnmacc_vf_nxv4f64_f32_nxv4f32(<vscale
 ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv4f64_f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v16, ft0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -801,7 +801,7 @@ define <vscale x 4 x double> @intrinsic_vfwnmacc_mask_vf_nxv4f64_f32_nxv4f32(<vs
 ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv4f64_f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v16, ft0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -827,7 +827,7 @@ define <vscale x 8 x double>  @intrinsic_vfwnmacc_vf_nxv8f64_f32_nxv8f32(<vscale
 ; CHECK-NEXT:    vsetvli a3, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a1)
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a2, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v16, ft0, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -853,7 +853,7 @@ define <vscale x 8 x double> @intrinsic_vfwnmacc_mask_vf_nxv8f64_f32_nxv8f32(<vs
 ; CHECK-NEXT:    vsetvli a3, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a1)
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a2, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v16, ft0, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv32.ll
index 40bd6c0688da..784f112da5b2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv32.ll
@@ -10,7 +10,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.nxv1f16(
 define <vscale x 1 x float>  @intrinsic_vfwnmsac_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv1f32_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1f16(
 define <vscale x 1 x float>  @intrinsic_vfwnmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.nxv2f16(
 define <vscale x 2 x float>  @intrinsic_vfwnmsac_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv2f32_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2f16(
 define <vscale x 2 x float>  @intrinsic_vfwnmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.nxv4f16(
 define <vscale x 4 x float>  @intrinsic_vfwnmsac_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv4f32_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v16, v18, v19
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4f16(
 define <vscale x 4 x float>  @intrinsic_vfwnmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v16, v18, v19, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.nxv8f16(
 define <vscale x 8 x float>  @intrinsic_vfwnmsac_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv8f32_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v16, v20, v22
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8f16(
 define <vscale x 8 x float>  @intrinsic_vfwnmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v16, v20, v22, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -197,7 +197,7 @@ define <vscale x 16 x float>  @intrinsic_vfwnmsac_vv_nxv16f32_nxv16f16_nxv16f16(
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a1)
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v16, v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -223,7 +223,7 @@ define <vscale x 16 x float>  @intrinsic_vfwnmsac_mask_vv_nxv16f32_nxv16f16_nxv1
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a1)
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v16, v8, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -247,7 +247,7 @@ define <vscale x 1 x float>  @intrinsic_vfwnmsac_vf_nxv1f32_f16_nxv1f16(<vscale
 ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv1f32_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -271,7 +271,7 @@ define <vscale x 1 x float> @intrinsic_vfwnmsac_mask_vf_nxv1f32_f16_nxv1f16(<vsc
 ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv1f32_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -295,7 +295,7 @@ define <vscale x 2 x float>  @intrinsic_vfwnmsac_vf_nxv2f32_f16_nxv2f16(<vscale
 ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv2f32_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -319,7 +319,7 @@ define <vscale x 2 x float> @intrinsic_vfwnmsac_mask_vf_nxv2f32_f16_nxv2f16(<vsc
 ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv2f32_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -343,7 +343,7 @@ define <vscale x 4 x float>  @intrinsic_vfwnmsac_vf_nxv4f32_f16_nxv4f16(<vscale
 ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv4f32_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v16, ft0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -367,7 +367,7 @@ define <vscale x 4 x float> @intrinsic_vfwnmsac_mask_vf_nxv4f32_f16_nxv4f16(<vsc
 ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv4f32_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v16, ft0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -391,7 +391,7 @@ define <vscale x 8 x float>  @intrinsic_vfwnmsac_vf_nxv8f32_f16_nxv8f16(<vscale
 ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv8f32_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v16, ft0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -415,7 +415,7 @@ define <vscale x 8 x float> @intrinsic_vfwnmsac_mask_vf_nxv8f32_f16_nxv8f16(<vsc
 ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv8f32_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v16, ft0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -441,7 +441,7 @@ define <vscale x 16 x float>  @intrinsic_vfwnmsac_vf_nxv16f32_f16_nxv16f16(<vsca
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a1)
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v16, ft0, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -467,7 +467,7 @@ define <vscale x 16 x float> @intrinsic_vfwnmsac_mask_vf_nxv16f32_f16_nxv16f16(<
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a1)
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v16, ft0, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv64.ll
index 95bcc7cbec90..fce7d8f0b94c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv64.ll
@@ -10,7 +10,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.nxv1f16(
 define <vscale x 1 x float>  @intrinsic_vfwnmsac_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv1f32_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1f16(
 define <vscale x 1 x float>  @intrinsic_vfwnmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.nxv2f16(
 define <vscale x 2 x float>  @intrinsic_vfwnmsac_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv2f32_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2f16(
 define <vscale x 2 x float>  @intrinsic_vfwnmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.nxv4f16(
 define <vscale x 4 x float>  @intrinsic_vfwnmsac_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv4f32_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v16, v18, v19
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4f16(
 define <vscale x 4 x float>  @intrinsic_vfwnmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v16, v18, v19, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.nxv8f16(
 define <vscale x 8 x float>  @intrinsic_vfwnmsac_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv8f32_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v16, v20, v22
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8f16(
 define <vscale x 8 x float>  @intrinsic_vfwnmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v16, v20, v22, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -197,7 +197,7 @@ define <vscale x 16 x float>  @intrinsic_vfwnmsac_vv_nxv16f32_nxv16f16_nxv16f16(
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a1)
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v16, v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -223,7 +223,7 @@ define <vscale x 16 x float>  @intrinsic_vfwnmsac_mask_vv_nxv16f32_nxv16f16_nxv1
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a1)
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v16, v8, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -246,7 +246,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwnmsac.nxv1f64.nxv1f32(
 define <vscale x 1 x double>  @intrinsic_vfwnmsac_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv1f64_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -269,7 +269,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwnmsac.mask.nxv1f64.nxv1f32(
 define <vscale x 1 x double>  @intrinsic_vfwnmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -292,7 +292,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfwnmsac.nxv2f64.nxv2f32(
 define <vscale x 2 x double>  @intrinsic_vfwnmsac_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv2f64_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v16, v18, v19
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -315,7 +315,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfwnmsac.mask.nxv2f64.nxv2f32(
 define <vscale x 2 x double>  @intrinsic_vfwnmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v16, v18, v19, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -338,7 +338,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfwnmsac.nxv4f64.nxv4f32(
 define <vscale x 4 x double>  @intrinsic_vfwnmsac_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv4f64_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v16, v20, v22
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -361,7 +361,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfwnmsac.mask.nxv4f64.nxv4f32(
 define <vscale x 4 x double>  @intrinsic_vfwnmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v16, v20, v22, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -387,7 +387,7 @@ define <vscale x 8 x double>  @intrinsic_vfwnmsac_vv_nxv8f64_nxv8f32_nxv8f32(<vs
 ; CHECK-NEXT:    vsetvli a3, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a1)
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v16, v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -413,7 +413,7 @@ define <vscale x 8 x double>  @intrinsic_vfwnmsac_mask_vv_nxv8f64_nxv8f32_nxv8f3
 ; CHECK-NEXT:    vsetvli a3, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a1)
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v16, v8, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -437,7 +437,7 @@ define <vscale x 1 x float>  @intrinsic_vfwnmsac_vf_nxv1f32_f16_nxv1f16(<vscale
 ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv1f32_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -461,7 +461,7 @@ define <vscale x 1 x float> @intrinsic_vfwnmsac_mask_vf_nxv1f32_f16_nxv1f16(<vsc
 ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv1f32_f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -485,7 +485,7 @@ define <vscale x 2 x float>  @intrinsic_vfwnmsac_vf_nxv2f32_f16_nxv2f16(<vscale
 ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv2f32_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -509,7 +509,7 @@ define <vscale x 2 x float> @intrinsic_vfwnmsac_mask_vf_nxv2f32_f16_nxv2f16(<vsc
 ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv2f32_f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -533,7 +533,7 @@ define <vscale x 4 x float>  @intrinsic_vfwnmsac_vf_nxv4f32_f16_nxv4f16(<vscale
 ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv4f32_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v16, ft0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -557,7 +557,7 @@ define <vscale x 4 x float> @intrinsic_vfwnmsac_mask_vf_nxv4f32_f16_nxv4f16(<vsc
 ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv4f32_f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v16, ft0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -581,7 +581,7 @@ define <vscale x 8 x float>  @intrinsic_vfwnmsac_vf_nxv8f32_f16_nxv8f16(<vscale
 ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv8f32_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v16, ft0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -605,7 +605,7 @@ define <vscale x 8 x float> @intrinsic_vfwnmsac_mask_vf_nxv8f32_f16_nxv8f16(<vsc
 ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv8f32_f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v16, ft0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -631,7 +631,7 @@ define <vscale x 16 x float>  @intrinsic_vfwnmsac_vf_nxv16f32_f16_nxv16f16(<vsca
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a1)
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v16, ft0, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -657,7 +657,7 @@ define <vscale x 16 x float> @intrinsic_vfwnmsac_mask_vf_nxv16f32_f16_nxv16f16(<
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a1)
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v16, ft0, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -681,7 +681,7 @@ define <vscale x 1 x double>  @intrinsic_vfwnmsac_vf_nxv1f64_f32_nxv1f32(<vscale
 ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv1f64_f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v16, ft0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -705,7 +705,7 @@ define <vscale x 1 x double> @intrinsic_vfwnmsac_mask_vf_nxv1f64_f32_nxv1f32(<vs
 ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv1f64_f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v16, ft0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -729,7 +729,7 @@ define <vscale x 2 x double>  @intrinsic_vfwnmsac_vf_nxv2f64_f32_nxv2f32(<vscale
 ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv2f64_f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v16, ft0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -753,7 +753,7 @@ define <vscale x 2 x double> @intrinsic_vfwnmsac_mask_vf_nxv2f64_f32_nxv2f32(<vs
 ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv2f64_f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v16, ft0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -777,7 +777,7 @@ define <vscale x 4 x double>  @intrinsic_vfwnmsac_vf_nxv4f64_f32_nxv4f32(<vscale
 ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv4f64_f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v16, ft0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -801,7 +801,7 @@ define <vscale x 4 x double> @intrinsic_vfwnmsac_mask_vf_nxv4f64_f32_nxv4f32(<vs
 ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv4f64_f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v16, ft0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -827,7 +827,7 @@ define <vscale x 8 x double>  @intrinsic_vfwnmsac_vf_nxv8f64_f32_nxv8f32(<vscale
 ; CHECK-NEXT:    vsetvli a3, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a1)
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a2, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v16, ft0, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -853,7 +853,7 @@ define <vscale x 8 x double> @intrinsic_vfwnmsac_mask_vf_nxv8f64_f32_nxv8f32(<vs
 ; CHECK-NEXT:    vsetvli a3, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a1)
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a2, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v16, ft0, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv32.ll
index 90056c1a123a..e54016962967 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f16(
 define <vscale x 1 x float> @intrinsic_vfwsub_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f16(
     <vscale x 1 x float> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f16(
 define <vscale x 2 x float> @intrinsic_vfwsub_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f16(
     <vscale x 2 x float> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f16(
 define <vscale x 4 x float> @intrinsic_vfwsub_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f16(
     <vscale x 4 x float> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f16(
 define <vscale x 8 x float> @intrinsic_vfwsub_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f16(
     <vscale x 8 x float> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f16(
 define <vscale x 16 x float> @intrinsic_vfwsub_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f16(
     <vscale x 16 x float> %0,
@@ -228,7 +228,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f16.f16(
 define <vscale x 1 x float> @intrinsic_vfwsub_mask_vf_nxv1f16_f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv1f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f16.f16(
     <vscale x 1 x float> %0,
@@ -268,7 +268,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f16.f16(
 define <vscale x 2 x float> @intrinsic_vfwsub_mask_vf_nxv2f16_f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv2f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f16.f16(
     <vscale x 2 x float> %0,
@@ -308,7 +308,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f16.f16(
 define <vscale x 4 x float> @intrinsic_vfwsub_mask_vf_nxv4f16_f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv4f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f16.f16(
     <vscale x 4 x float> %0,
@@ -348,7 +348,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f16.f16(
 define <vscale x 8 x float> @intrinsic_vfwsub_mask_vf_nxv8f16_f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv8f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f16.f16(
     <vscale x 8 x float> %0,
@@ -388,7 +388,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f16.f16(
 define <vscale x 16 x float> @intrinsic_vfwsub_mask_vf_nxv16f16_f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv16f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f16.f16(
     <vscale x 16 x float> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv64.ll
index d0a5c1182fae..9226f3216532 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f16(
 define <vscale x 1 x float> @intrinsic_vfwsub_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f16(
     <vscale x 1 x float> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f16(
 define <vscale x 2 x float> @intrinsic_vfwsub_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f16(
     <vscale x 2 x float> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f16(
 define <vscale x 4 x float> @intrinsic_vfwsub_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f16(
     <vscale x 4 x float> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f16(
 define <vscale x 8 x float> @intrinsic_vfwsub_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f16(
     <vscale x 8 x float> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f16(
 define <vscale x 16 x float> @intrinsic_vfwsub_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f16(
     <vscale x 16 x float> %0,
@@ -228,7 +228,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwsub.mask.nxv1f32(
 define <vscale x 1 x double> @intrinsic_vfwsub_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.mask.nxv1f32(
     <vscale x 1 x double> %0,
@@ -268,7 +268,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfwsub.mask.nxv2f32(
 define <vscale x 2 x double> @intrinsic_vfwsub_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.mask.nxv2f32(
     <vscale x 2 x double> %0,
@@ -308,7 +308,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfwsub.mask.nxv4f32(
 define <vscale x 4 x double> @intrinsic_vfwsub_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.mask.nxv4f32(
     <vscale x 4 x double> %0,
@@ -348,7 +348,7 @@ declare <vscale x 8 x double> @llvm.riscv.vfwsub.mask.nxv8f32(
 define <vscale x 8 x double> @intrinsic_vfwsub_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.mask.nxv8f32(
     <vscale x 8 x double> %0,
@@ -388,7 +388,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f16.f16(
 define <vscale x 1 x float> @intrinsic_vfwsub_mask_vf_nxv1f16_f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv1f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f16.f16(
     <vscale x 1 x float> %0,
@@ -428,7 +428,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f16.f16(
 define <vscale x 2 x float> @intrinsic_vfwsub_mask_vf_nxv2f16_f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv2f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f16.f16(
     <vscale x 2 x float> %0,
@@ -468,7 +468,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f16.f16(
 define <vscale x 4 x float> @intrinsic_vfwsub_mask_vf_nxv4f16_f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv4f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f16.f16(
     <vscale x 4 x float> %0,
@@ -508,7 +508,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f16.f16(
 define <vscale x 8 x float> @intrinsic_vfwsub_mask_vf_nxv8f16_f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv8f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f16.f16(
     <vscale x 8 x float> %0,
@@ -548,7 +548,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f16.f16(
 define <vscale x 16 x float> @intrinsic_vfwsub_mask_vf_nxv16f16_f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv16f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f16.f16(
     <vscale x 16 x float> %0,
@@ -588,7 +588,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwsub.mask.nxv1f32.f32(
 define <vscale x 1 x double> @intrinsic_vfwsub_mask_vf_nxv1f32_f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv1f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.mask.nxv1f32.f32(
     <vscale x 1 x double> %0,
@@ -628,7 +628,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfwsub.mask.nxv2f32.f32(
 define <vscale x 2 x double> @intrinsic_vfwsub_mask_vf_nxv2f32_f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv2f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.mask.nxv2f32.f32(
     <vscale x 2 x double> %0,
@@ -668,7 +668,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfwsub.mask.nxv4f32.f32(
 define <vscale x 4 x double> @intrinsic_vfwsub_mask_vf_nxv4f32_f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv4f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.mask.nxv4f32.f32(
     <vscale x 4 x double> %0,
@@ -708,7 +708,7 @@ declare <vscale x 8 x double> @llvm.riscv.vfwsub.mask.nxv8f32.f32(
 define <vscale x 8 x double> @intrinsic_vfwsub_mask_vf_nxv8f32_f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv8f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.mask.nxv8f32.f32(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv32.ll
index 2cf5dd6bc439..d50d1687a352 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f16(
 define <vscale x 1 x float> @intrinsic_vfwsub.w_mask_wv_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv1f32_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f16(
     <vscale x 1 x float> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f16(
 define <vscale x 2 x float> @intrinsic_vfwsub.w_mask_wv_nxv2f32_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv2f32_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f16(
     <vscale x 2 x float> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f16(
 define <vscale x 4 x float> @intrinsic_vfwsub.w_mask_wv_nxv4f32_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv4f32_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f16(
     <vscale x 4 x float> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f16(
 define <vscale x 8 x float> @intrinsic_vfwsub.w_mask_wv_nxv8f32_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv8f32_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f16(
     <vscale x 8 x float> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f16(
 define <vscale x 16 x float> @intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f16(
     <vscale x 16 x float> %0,
@@ -228,7 +228,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.f16(
 define <vscale x 1 x float> @intrinsic_vfwsub.w_mask_wf_nxv1f32_f16(<vscale x 1 x float> %0, <vscale x 1 x float> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv1f32_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.f16(
     <vscale x 1 x float> %0,
@@ -268,7 +268,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.f16(
 define <vscale x 2 x float> @intrinsic_vfwsub.w_mask_wf_nxv2f32_f16(<vscale x 2 x float> %0, <vscale x 2 x float> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv2f32_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.f16(
     <vscale x 2 x float> %0,
@@ -308,7 +308,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.f16(
 define <vscale x 4 x float> @intrinsic_vfwsub.w_mask_wf_nxv4f32_f16(<vscale x 4 x float> %0, <vscale x 4 x float> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv4f32_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.f16(
     <vscale x 4 x float> %0,
@@ -348,7 +348,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.f16(
 define <vscale x 8 x float> @intrinsic_vfwsub.w_mask_wf_nxv8f32_f16(<vscale x 8 x float> %0, <vscale x 8 x float> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv8f32_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.f16(
     <vscale x 8 x float> %0,
@@ -388,7 +388,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.f16(
 define <vscale x 16 x float> @intrinsic_vfwsub.w_mask_wf_nxv16f32_f16(<vscale x 16 x float> %0, <vscale x 16 x float> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv16f32_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.f16(
     <vscale x 16 x float> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv64.ll
index 1755ec6bf3c2..43245ff0a18b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f16(
 define <vscale x 1 x float> @intrinsic_vfwsub.w_mask_wv_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv1f32_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f16(
     <vscale x 1 x float> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f16(
 define <vscale x 2 x float> @intrinsic_vfwsub.w_mask_wv_nxv2f32_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv2f32_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f16(
     <vscale x 2 x float> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f16(
 define <vscale x 4 x float> @intrinsic_vfwsub.w_mask_wv_nxv4f32_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv4f32_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f16(
     <vscale x 4 x float> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f16(
 define <vscale x 8 x float> @intrinsic_vfwsub.w_mask_wv_nxv8f32_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv8f32_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f16(
     <vscale x 8 x float> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f16(
 define <vscale x 16 x float> @intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f16(
     <vscale x 16 x float> %0,
@@ -228,7 +228,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwsub.w.mask.nxv1f32(
 define <vscale x 1 x double> @intrinsic_vfwsub.w_mask_wv_nxv1f64_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv1f64_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.mask.nxv1f32(
     <vscale x 1 x double> %0,
@@ -268,7 +268,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfwsub.w.mask.nxv2f32(
 define <vscale x 2 x double> @intrinsic_vfwsub.w_mask_wv_nxv2f64_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv2f64_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.mask.nxv2f32(
     <vscale x 2 x double> %0,
@@ -308,7 +308,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfwsub.w.mask.nxv4f32(
 define <vscale x 4 x double> @intrinsic_vfwsub.w_mask_wv_nxv4f64_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv4f64_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.mask.nxv4f32(
     <vscale x 4 x double> %0,
@@ -348,7 +348,7 @@ declare <vscale x 8 x double> @llvm.riscv.vfwsub.w.mask.nxv8f32(
 define <vscale x 8 x double> @intrinsic_vfwsub.w_mask_wv_nxv8f64_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv8f64_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.mask.nxv8f32(
     <vscale x 8 x double> %0,
@@ -388,7 +388,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.f16(
 define <vscale x 1 x float> @intrinsic_vfwsub.w_mask_wf_nxv1f32_f16(<vscale x 1 x float> %0, <vscale x 1 x float> %1, half %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv1f32_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.f16(
     <vscale x 1 x float> %0,
@@ -428,7 +428,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.f16(
 define <vscale x 2 x float> @intrinsic_vfwsub.w_mask_wf_nxv2f32_f16(<vscale x 2 x float> %0, <vscale x 2 x float> %1, half %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv2f32_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.f16(
     <vscale x 2 x float> %0,
@@ -468,7 +468,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.f16(
 define <vscale x 4 x float> @intrinsic_vfwsub.w_mask_wf_nxv4f32_f16(<vscale x 4 x float> %0, <vscale x 4 x float> %1, half %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv4f32_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.f16(
     <vscale x 4 x float> %0,
@@ -508,7 +508,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.f16(
 define <vscale x 8 x float> @intrinsic_vfwsub.w_mask_wf_nxv8f32_f16(<vscale x 8 x float> %0, <vscale x 8 x float> %1, half %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv8f32_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.f16(
     <vscale x 8 x float> %0,
@@ -548,7 +548,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.f16(
 define <vscale x 16 x float> @intrinsic_vfwsub.w_mask_wf_nxv16f32_f16(<vscale x 16 x float> %0, <vscale x 16 x float> %1, half %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv16f32_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.f16(
     <vscale x 16 x float> %0,
@@ -588,7 +588,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwsub.w.mask.nxv1f64.f32(
 define <vscale x 1 x double> @intrinsic_vfwsub.w_mask_wf_nxv1f64_f32(<vscale x 1 x double> %0, <vscale x 1 x double> %1, float %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv1f64_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.mask.nxv1f64.f32(
     <vscale x 1 x double> %0,
@@ -628,7 +628,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfwsub.w.mask.nxv2f64.f32(
 define <vscale x 2 x double> @intrinsic_vfwsub.w_mask_wf_nxv2f64_f32(<vscale x 2 x double> %0, <vscale x 2 x double> %1, float %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv2f64_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.mask.nxv2f64.f32(
     <vscale x 2 x double> %0,
@@ -668,7 +668,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfwsub.w.mask.nxv4f64.f32(
 define <vscale x 4 x double> @intrinsic_vfwsub.w_mask_wf_nxv4f64_f32(<vscale x 4 x double> %0, <vscale x 4 x double> %1, float %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv4f64_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.mask.nxv4f64.f32(
     <vscale x 4 x double> %0,
@@ -708,7 +708,7 @@ declare <vscale x 8 x double> @llvm.riscv.vfwsub.w.mask.nxv8f64.f32(
 define <vscale x 8 x double> @intrinsic_vfwsub.w_mask_wf_nxv8f64_f32(<vscale x 8 x double> %0, <vscale x 8 x double> %1, float %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv8f64_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.mask.nxv8f64.f32(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vid-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vid-rv32.ll
index 9e2d395a44ec..56399a39d449 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vid-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vid-rv32.ll
@@ -22,7 +22,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vid.mask.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vid_mask_v_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i8
-; CHECK:       vsetvli {{.*}}, a0, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, a0, e8,mf8,tu,mu
 ; CHECK:       vid.v {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vid.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -54,7 +54,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vid.mask.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vid_mask_v_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv2i8
-; CHECK:       vsetvli {{.*}}, a0, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, a0, e8,mf4,tu,mu
 ; CHECK:       vid.v {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vid.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -86,7 +86,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vid.mask.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vid_mask_v_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv4i8
-; CHECK:       vsetvli {{.*}}, a0, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, a0, e8,mf2,tu,mu
 ; CHECK:       vid.v {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vid.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -118,7 +118,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vid.mask.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vid_mask_v_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv8i8
-; CHECK:       vsetvli {{.*}}, a0, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, a0, e8,m1,tu,mu
 ; CHECK:       vid.v {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vid.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -150,7 +150,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vid.mask.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vid_mask_v_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv16i8
-; CHECK:       vsetvli {{.*}}, a0, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, a0, e8,m2,tu,mu
 ; CHECK:       vid.v {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vid.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -182,7 +182,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vid.mask.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vid_mask_v_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i1> %1, i32 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv32i8
-; CHECK:       vsetvli {{.*}}, a0, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, a0, e8,m4,tu,mu
 ; CHECK:       vid.v {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vid.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -214,7 +214,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vid.mask.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vid_mask_v_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i16
-; CHECK:       vsetvli {{.*}}, a0, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, a0, e16,mf4,tu,mu
 ; CHECK:       vid.v {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vid.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -246,7 +246,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vid.mask.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vid_mask_v_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv2i16
-; CHECK:       vsetvli {{.*}}, a0, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, a0, e16,mf2,tu,mu
 ; CHECK:       vid.v {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vid.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -278,7 +278,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vid.mask.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vid_mask_v_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv4i16
-; CHECK:       vsetvli {{.*}}, a0, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, a0, e16,m1,tu,mu
 ; CHECK:       vid.v {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vid.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -310,7 +310,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vid.mask.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vid_mask_v_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv8i16
-; CHECK:       vsetvli {{.*}}, a0, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, a0, e16,m2,tu,mu
 ; CHECK:       vid.v {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vid.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -342,7 +342,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vid.mask.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vid_mask_v_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv16i16
-; CHECK:       vsetvli {{.*}}, a0, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, a0, e16,m4,tu,mu
 ; CHECK:       vid.v {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vid.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -374,7 +374,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vid.mask.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vid_mask_v_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i1> %1, i32 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv32i16
-; CHECK:       vsetvli {{.*}}, a0, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, a0, e16,m8,tu,mu
 ; CHECK:       vid.v {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vid.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -406,7 +406,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vid.mask.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vid_mask_v_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i32
-; CHECK:       vsetvli {{.*}}, a0, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, a0, e32,mf2,tu,mu
 ; CHECK:       vid.v {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vid.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -438,7 +438,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vid.mask.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vid_mask_v_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv2i32
-; CHECK:       vsetvli {{.*}}, a0, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, a0, e32,m1,tu,mu
 ; CHECK:       vid.v {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vid.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -470,7 +470,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vid.mask.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vid_mask_v_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv4i32
-; CHECK:       vsetvli {{.*}}, a0, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, a0, e32,m2,tu,mu
 ; CHECK:       vid.v {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vid.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -502,7 +502,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vid.mask.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vid_mask_v_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv8i32
-; CHECK:       vsetvli {{.*}}, a0, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, a0, e32,m4,tu,mu
 ; CHECK:       vid.v {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vid.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -534,7 +534,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vid.mask.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vid_mask_v_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv16i32
-; CHECK:       vsetvli {{.*}}, a0, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, a0, e32,m8,tu,mu
 ; CHECK:       vid.v {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vid.mask.nxv16i32(
     <vscale x 16 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vid-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vid-rv64.ll
index 53aa6aa02104..77f19f9bc307 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vid-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vid-rv64.ll
@@ -22,7 +22,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vid.mask.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vid_mask_v_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i8
-; CHECK:       vsetvli {{.*}}, a0, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, a0, e8,mf8,tu,mu
 ; CHECK:       vid.v {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vid.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -54,7 +54,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vid.mask.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vid_mask_v_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv2i8
-; CHECK:       vsetvli {{.*}}, a0, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, a0, e8,mf4,tu,mu
 ; CHECK:       vid.v {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vid.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -86,7 +86,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vid.mask.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vid_mask_v_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv4i8
-; CHECK:       vsetvli {{.*}}, a0, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, a0, e8,mf2,tu,mu
 ; CHECK:       vid.v {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vid.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -118,7 +118,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vid.mask.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vid_mask_v_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv8i8
-; CHECK:       vsetvli {{.*}}, a0, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, a0, e8,m1,tu,mu
 ; CHECK:       vid.v {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vid.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -150,7 +150,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vid.mask.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vid_mask_v_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv16i8
-; CHECK:       vsetvli {{.*}}, a0, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, a0, e8,m2,tu,mu
 ; CHECK:       vid.v {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vid.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -182,7 +182,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vid.mask.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vid_mask_v_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i1> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv32i8
-; CHECK:       vsetvli {{.*}}, a0, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, a0, e8,m4,tu,mu
 ; CHECK:       vid.v {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vid.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -214,7 +214,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vid.mask.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vid_mask_v_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i16
-; CHECK:       vsetvli {{.*}}, a0, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, a0, e16,mf4,tu,mu
 ; CHECK:       vid.v {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vid.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -246,7 +246,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vid.mask.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vid_mask_v_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv2i16
-; CHECK:       vsetvli {{.*}}, a0, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, a0, e16,mf2,tu,mu
 ; CHECK:       vid.v {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vid.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -278,7 +278,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vid.mask.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vid_mask_v_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv4i16
-; CHECK:       vsetvli {{.*}}, a0, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, a0, e16,m1,tu,mu
 ; CHECK:       vid.v {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vid.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -310,7 +310,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vid.mask.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vid_mask_v_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv8i16
-; CHECK:       vsetvli {{.*}}, a0, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, a0, e16,m2,tu,mu
 ; CHECK:       vid.v {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vid.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -342,7 +342,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vid.mask.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vid_mask_v_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv16i16
-; CHECK:       vsetvli {{.*}}, a0, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, a0, e16,m4,tu,mu
 ; CHECK:       vid.v {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vid.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -374,7 +374,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vid.mask.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vid_mask_v_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i1> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv32i16
-; CHECK:       vsetvli {{.*}}, a0, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, a0, e16,m8,tu,mu
 ; CHECK:       vid.v {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vid.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -406,7 +406,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vid.mask.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vid_mask_v_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i32
-; CHECK:       vsetvli {{.*}}, a0, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, a0, e32,mf2,tu,mu
 ; CHECK:       vid.v {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vid.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -438,7 +438,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vid.mask.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vid_mask_v_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv2i32
-; CHECK:       vsetvli {{.*}}, a0, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, a0, e32,m1,tu,mu
 ; CHECK:       vid.v {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vid.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -470,7 +470,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vid.mask.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vid_mask_v_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv4i32
-; CHECK:       vsetvli {{.*}}, a0, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, a0, e32,m2,tu,mu
 ; CHECK:       vid.v {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vid.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -502,7 +502,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vid.mask.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vid_mask_v_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv8i32
-; CHECK:       vsetvli {{.*}}, a0, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, a0, e32,m4,tu,mu
 ; CHECK:       vid.v {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vid.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -534,7 +534,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vid.mask.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vid_mask_v_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv16i32
-; CHECK:       vsetvli {{.*}}, a0, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, a0, e32,m8,tu,mu
 ; CHECK:       vid.v {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vid.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -566,7 +566,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vid.mask.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vid_mask_v_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i64
-; CHECK:       vsetvli {{.*}}, a0, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, a0, e64,m1,tu,mu
 ; CHECK:       vid.v {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vid.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -598,7 +598,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vid.mask.nxv2i64(
 define <vscale x 2 x i64> @intrinsic_vid_mask_v_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv2i64
-; CHECK:       vsetvli {{.*}}, a0, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, a0, e64,m2,tu,mu
 ; CHECK:       vid.v {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vid.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -630,7 +630,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vid.mask.nxv4i64(
 define <vscale x 4 x i64> @intrinsic_vid_mask_v_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv4i64
-; CHECK:       vsetvli {{.*}}, a0, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, a0, e64,m4,tu,mu
 ; CHECK:       vid.v {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vid.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -662,7 +662,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vid.mask.nxv8i64(
 define <vscale x 8 x i64> @intrinsic_vid_mask_v_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv8i64
-; CHECK:       vsetvli {{.*}}, a0, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, a0, e64,m8,tu,mu
 ; CHECK:       vid.v {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vid.mask.nxv8i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/viota-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/viota-rv32.ll
index bc1b1c56bb63..5c663626194f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/viota-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/viota-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.viota.mask.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_viota_mask_m_nxv1i8_nxv1i1(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i8_nxv1i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    viota.m v16, v0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.viota.mask.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_viota_mask_m_nxv2i8_nxv2i1(<vscale x 2 x i8> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv2i8_nxv2i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    viota.m v16, v0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.viota.mask.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_viota_mask_m_nxv4i8_nxv4i1(<vscale x 4 x i8> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv4i8_nxv4i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    viota.m v16, v0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.viota.mask.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_viota_mask_m_nxv8i8_nxv8i1(<vscale x 8 x i8> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv8i8_nxv8i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    viota.m v16, v0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.viota.mask.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_viota_mask_m_nxv16i8_nxv16i1(<vscale x 16 x i8> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv16i8_nxv16i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    viota.m v16, v0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.viota.mask.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_viota_mask_m_nxv32i8_nxv32i1(<vscale x 32 x i8> %0, <vscale x 32 x i1> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv32i8_nxv32i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    viota.m v16, v0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -268,7 +268,7 @@ declare <vscale x 64 x i8> @llvm.riscv.viota.mask.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_viota_mask_m_nxv64i8_nxv64i1(<vscale x 64 x i8> %0, <vscale x 64 x i1> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv64i8_nxv64i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    viota.m v16, v0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -308,7 +308,7 @@ declare <vscale x 1 x i16> @llvm.riscv.viota.mask.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_viota_mask_m_nxv1i16_nxv1i1(<vscale x 1 x i16> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i16_nxv1i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    viota.m v16, v0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -348,7 +348,7 @@ declare <vscale x 2 x i16> @llvm.riscv.viota.mask.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_viota_mask_m_nxv2i16_nxv2i1(<vscale x 2 x i16> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv2i16_nxv2i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    viota.m v16, v0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -388,7 +388,7 @@ declare <vscale x 4 x i16> @llvm.riscv.viota.mask.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_viota_mask_m_nxv4i16_nxv4i1(<vscale x 4 x i16> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv4i16_nxv4i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    viota.m v16, v0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -428,7 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.viota.mask.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_viota_mask_m_nxv8i16_nxv8i1(<vscale x 8 x i16> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv8i16_nxv8i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    viota.m v16, v0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -468,7 +468,7 @@ declare <vscale x 16 x i16> @llvm.riscv.viota.mask.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_viota_mask_m_nxv16i16_nxv16i1(<vscale x 16 x i16> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv16i16_nxv16i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    viota.m v16, v0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.riscv.viota.mask.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_viota_mask_m_nxv32i16_nxv32i1(<vscale x 32 x i16> %0, <vscale x 32 x i1> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv32i16_nxv32i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    viota.m v16, v0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -548,7 +548,7 @@ declare <vscale x 1 x i32> @llvm.riscv.viota.mask.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_viota_mask_m_nxv1i32_nxv1i1(<vscale x 1 x i32> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i32_nxv1i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    viota.m v16, v0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -588,7 +588,7 @@ declare <vscale x 2 x i32> @llvm.riscv.viota.mask.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_viota_mask_m_nxv2i32_nxv2i1(<vscale x 2 x i32> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv2i32_nxv2i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    viota.m v16, v0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -628,7 +628,7 @@ declare <vscale x 4 x i32> @llvm.riscv.viota.mask.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_viota_mask_m_nxv4i32_nxv4i1(<vscale x 4 x i32> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv4i32_nxv4i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    viota.m v16, v0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -668,7 +668,7 @@ declare <vscale x 8 x i32> @llvm.riscv.viota.mask.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_viota_mask_m_nxv8i32_nxv8i1(<vscale x 8 x i32> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv8i32_nxv8i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    viota.m v16, v0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -708,7 +708,7 @@ declare <vscale x 16 x i32> @llvm.riscv.viota.mask.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_viota_mask_m_nxv16i32_nxv16i1(<vscale x 16 x i32> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv16i32_nxv16i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    viota.m v16, v0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/viota-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/viota-rv64.ll
index 7a2783c29a1e..249419989a3d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/viota-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/viota-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.viota.mask.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_viota_mask_m_nxv1i8_nxv1i1(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i8_nxv1i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    viota.m v16, v0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.viota.mask.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_viota_mask_m_nxv2i8_nxv2i1(<vscale x 2 x i8> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv2i8_nxv2i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    viota.m v16, v0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.viota.mask.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_viota_mask_m_nxv4i8_nxv4i1(<vscale x 4 x i8> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv4i8_nxv4i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    viota.m v16, v0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.viota.mask.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_viota_mask_m_nxv8i8_nxv8i1(<vscale x 8 x i8> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv8i8_nxv8i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    viota.m v16, v0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.viota.mask.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_viota_mask_m_nxv16i8_nxv16i1(<vscale x 16 x i8> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv16i8_nxv16i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    viota.m v16, v0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.viota.mask.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_viota_mask_m_nxv32i8_nxv32i1(<vscale x 32 x i8> %0, <vscale x 32 x i1> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv32i8_nxv32i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    viota.m v16, v0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -268,7 +268,7 @@ declare <vscale x 64 x i8> @llvm.riscv.viota.mask.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_viota_mask_m_nxv64i8_nxv64i1(<vscale x 64 x i8> %0, <vscale x 64 x i1> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv64i8_nxv64i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    viota.m v16, v0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -308,7 +308,7 @@ declare <vscale x 1 x i16> @llvm.riscv.viota.mask.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_viota_mask_m_nxv1i16_nxv1i1(<vscale x 1 x i16> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i16_nxv1i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    viota.m v16, v0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -348,7 +348,7 @@ declare <vscale x 2 x i16> @llvm.riscv.viota.mask.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_viota_mask_m_nxv2i16_nxv2i1(<vscale x 2 x i16> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv2i16_nxv2i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    viota.m v16, v0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -388,7 +388,7 @@ declare <vscale x 4 x i16> @llvm.riscv.viota.mask.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_viota_mask_m_nxv4i16_nxv4i1(<vscale x 4 x i16> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv4i16_nxv4i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    viota.m v16, v0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -428,7 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.viota.mask.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_viota_mask_m_nxv8i16_nxv8i1(<vscale x 8 x i16> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv8i16_nxv8i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    viota.m v16, v0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -468,7 +468,7 @@ declare <vscale x 16 x i16> @llvm.riscv.viota.mask.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_viota_mask_m_nxv16i16_nxv16i1(<vscale x 16 x i16> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv16i16_nxv16i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    viota.m v16, v0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.riscv.viota.mask.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_viota_mask_m_nxv32i16_nxv32i1(<vscale x 32 x i16> %0, <vscale x 32 x i1> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv32i16_nxv32i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    viota.m v16, v0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -548,7 +548,7 @@ declare <vscale x 1 x i32> @llvm.riscv.viota.mask.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_viota_mask_m_nxv1i32_nxv1i1(<vscale x 1 x i32> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i32_nxv1i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    viota.m v16, v0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -588,7 +588,7 @@ declare <vscale x 2 x i32> @llvm.riscv.viota.mask.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_viota_mask_m_nxv2i32_nxv2i1(<vscale x 2 x i32> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv2i32_nxv2i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    viota.m v16, v0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -628,7 +628,7 @@ declare <vscale x 4 x i32> @llvm.riscv.viota.mask.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_viota_mask_m_nxv4i32_nxv4i1(<vscale x 4 x i32> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv4i32_nxv4i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    viota.m v16, v0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -668,7 +668,7 @@ declare <vscale x 8 x i32> @llvm.riscv.viota.mask.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_viota_mask_m_nxv8i32_nxv8i1(<vscale x 8 x i32> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv8i32_nxv8i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    viota.m v16, v0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -708,7 +708,7 @@ declare <vscale x 16 x i32> @llvm.riscv.viota.mask.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_viota_mask_m_nxv16i32_nxv16i1(<vscale x 16 x i32> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv16i32_nxv16i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    viota.m v16, v0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -748,7 +748,7 @@ declare <vscale x 1 x i64> @llvm.riscv.viota.mask.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_viota_mask_m_nxv1i64_nxv1i1(<vscale x 1 x i64> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i64_nxv1i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    viota.m v16, v0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -788,7 +788,7 @@ declare <vscale x 2 x i64> @llvm.riscv.viota.mask.nxv2i64(
 define <vscale x 2 x i64> @intrinsic_viota_mask_m_nxv2i64_nxv2i1(<vscale x 2 x i64> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv2i64_nxv2i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    viota.m v16, v0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -828,7 +828,7 @@ declare <vscale x 4 x i64> @llvm.riscv.viota.mask.nxv4i64(
 define <vscale x 4 x i64> @intrinsic_viota_mask_m_nxv4i64_nxv4i1(<vscale x 4 x i64> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv4i64_nxv4i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    viota.m v16, v0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -868,7 +868,7 @@ declare <vscale x 8 x i64> @llvm.riscv.viota.mask.nxv8i64(
 define <vscale x 8 x i64> @intrinsic_viota_mask_m_nxv8i64_nxv8i1(<vscale x 8 x i64> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv8i64_nxv8i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    viota.m v16, v0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vle-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vle-rv32.ll
index 32e9004f9fb0..f00edb1bb12e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vle-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vle-rv32.ll
@@ -26,7 +26,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vle.mask.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vle_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vle32.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vle.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -62,7 +62,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vle.mask.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vle_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vle32.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vle.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -98,7 +98,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vle.mask.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vle_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vle32.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vle.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -134,7 +134,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vle.mask.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vle_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vle32.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vle.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -170,7 +170,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vle.mask.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vle_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vle32.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vle.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -206,7 +206,7 @@ declare <vscale x 1 x float> @llvm.riscv.vle.mask.nxv1f32(
 define <vscale x 1 x float> @intrinsic_vle_mask_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vle32.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vle.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -242,7 +242,7 @@ declare <vscale x 2 x float> @llvm.riscv.vle.mask.nxv2f32(
 define <vscale x 2 x float> @intrinsic_vle_mask_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vle32.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vle.mask.nxv2f32(
     <vscale x 2 x float> %0,
@@ -278,7 +278,7 @@ declare <vscale x 4 x float> @llvm.riscv.vle.mask.nxv4f32(
 define <vscale x 4 x float> @intrinsic_vle_mask_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vle32.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vle.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -314,7 +314,7 @@ declare <vscale x 8 x float> @llvm.riscv.vle.mask.nxv8f32(
 define <vscale x 8 x float> @intrinsic_vle_mask_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vle32.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vle.mask.nxv8f32(
     <vscale x 8 x float> %0,
@@ -350,7 +350,7 @@ declare <vscale x 16 x float> @llvm.riscv.vle.mask.nxv16f32(
 define <vscale x 16 x float> @intrinsic_vle_mask_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv16f32_nxv16f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vle32.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vle.mask.nxv16f32(
     <vscale x 16 x float> %0,
@@ -386,7 +386,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vle.mask.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vle_mask_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vle16.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vle.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -422,7 +422,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vle.mask.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vle_mask_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vle16.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vle.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -458,7 +458,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vle.mask.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vle_mask_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vle16.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vle.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -494,7 +494,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vle.mask.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vle_mask_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vle16.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vle.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -530,7 +530,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vle.mask.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vle_mask_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vle16.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vle.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -566,7 +566,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vle.mask.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vle_mask_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vle16.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vle.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -602,7 +602,7 @@ declare <vscale x 1 x half> @llvm.riscv.vle.mask.nxv1f16(
 define <vscale x 1 x half> @intrinsic_vle_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vle16.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vle.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -638,7 +638,7 @@ declare <vscale x 2 x half> @llvm.riscv.vle.mask.nxv2f16(
 define <vscale x 2 x half> @intrinsic_vle_mask_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vle16.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vle.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -674,7 +674,7 @@ declare <vscale x 4 x half> @llvm.riscv.vle.mask.nxv4f16(
 define <vscale x 4 x half> @intrinsic_vle_mask_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vle16.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vle.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -710,7 +710,7 @@ declare <vscale x 8 x half> @llvm.riscv.vle.mask.nxv8f16(
 define <vscale x 8 x half> @intrinsic_vle_mask_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vle16.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vle.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -746,7 +746,7 @@ declare <vscale x 16 x half> @llvm.riscv.vle.mask.nxv16f16(
 define <vscale x 16 x half> @intrinsic_vle_mask_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vle16.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vle.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -782,7 +782,7 @@ declare <vscale x 32 x half> @llvm.riscv.vle.mask.nxv32f16(
 define <vscale x 32 x half> @intrinsic_vle_mask_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv32f16_nxv32f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vle16.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 32 x half> @llvm.riscv.vle.mask.nxv32f16(
     <vscale x 32 x half> %0,
@@ -818,7 +818,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vle.mask.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vle_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vle8.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vle.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -854,7 +854,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vle.mask.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vle_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vle8.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vle.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -890,7 +890,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vle.mask.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vle_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vle8.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vle.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -926,7 +926,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vle.mask.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vle_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vle8.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vle.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -962,7 +962,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vle.mask.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vle_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vle8.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vle.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -998,7 +998,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vle.mask.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vle_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vle8.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vle.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1034,7 +1034,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vle.mask.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vle_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vle8.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vle.mask.nxv64i8(
     <vscale x 64 x i8> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vle-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vle-rv64.ll
index 9f69a245d5c0..74eaa133e301 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vle-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vle-rv64.ll
@@ -26,7 +26,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vle.mask.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vle_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i64_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vle64.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vle.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -62,7 +62,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vle.mask.nxv2i64(
 define <vscale x 2 x i64> @intrinsic_vle_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i64_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vle64.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vle.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -98,7 +98,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vle.mask.nxv4i64(
 define <vscale x 4 x i64> @intrinsic_vle_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i64_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vle64.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vle.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -134,7 +134,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vle.mask.nxv8i64(
 define <vscale x 8 x i64> @intrinsic_vle_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i64_nxv8i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vle64.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vle.mask.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -170,7 +170,7 @@ declare <vscale x 1 x double> @llvm.riscv.vle.mask.nxv1f64(
 define <vscale x 1 x double> @intrinsic_vle_mask_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1f64_nxv1f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vle64.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 1 x double> @llvm.riscv.vle.mask.nxv1f64(
     <vscale x 1 x double> %0,
@@ -206,7 +206,7 @@ declare <vscale x 2 x double> @llvm.riscv.vle.mask.nxv2f64(
 define <vscale x 2 x double> @intrinsic_vle_mask_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2f64_nxv2f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vle64.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 2 x double> @llvm.riscv.vle.mask.nxv2f64(
     <vscale x 2 x double> %0,
@@ -242,7 +242,7 @@ declare <vscale x 4 x double> @llvm.riscv.vle.mask.nxv4f64(
 define <vscale x 4 x double> @intrinsic_vle_mask_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4f64_nxv4f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vle64.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 4 x double> @llvm.riscv.vle.mask.nxv4f64(
     <vscale x 4 x double> %0,
@@ -278,7 +278,7 @@ declare <vscale x 8 x double> @llvm.riscv.vle.mask.nxv8f64(
 define <vscale x 8 x double> @intrinsic_vle_mask_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8f64_nxv8f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vle64.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 8 x double> @llvm.riscv.vle.mask.nxv8f64(
     <vscale x 8 x double> %0,
@@ -314,7 +314,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vle.mask.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vle_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vle32.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vle.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -350,7 +350,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vle.mask.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vle_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vle32.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vle.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -386,7 +386,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vle.mask.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vle_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vle32.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vle.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -422,7 +422,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vle.mask.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vle_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vle32.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vle.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -458,7 +458,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vle.mask.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vle_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vle32.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vle.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -494,7 +494,7 @@ declare <vscale x 1 x float> @llvm.riscv.vle.mask.nxv1f32(
 define <vscale x 1 x float> @intrinsic_vle_mask_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vle32.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vle.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -530,7 +530,7 @@ declare <vscale x 2 x float> @llvm.riscv.vle.mask.nxv2f32(
 define <vscale x 2 x float> @intrinsic_vle_mask_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vle32.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vle.mask.nxv2f32(
     <vscale x 2 x float> %0,
@@ -566,7 +566,7 @@ declare <vscale x 4 x float> @llvm.riscv.vle.mask.nxv4f32(
 define <vscale x 4 x float> @intrinsic_vle_mask_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vle32.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vle.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -602,7 +602,7 @@ declare <vscale x 8 x float> @llvm.riscv.vle.mask.nxv8f32(
 define <vscale x 8 x float> @intrinsic_vle_mask_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vle32.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vle.mask.nxv8f32(
     <vscale x 8 x float> %0,
@@ -638,7 +638,7 @@ declare <vscale x 16 x float> @llvm.riscv.vle.mask.nxv16f32(
 define <vscale x 16 x float> @intrinsic_vle_mask_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv16f32_nxv16f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vle32.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vle.mask.nxv16f32(
     <vscale x 16 x float> %0,
@@ -674,7 +674,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vle.mask.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vle_mask_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vle16.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vle.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -710,7 +710,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vle.mask.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vle_mask_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vle16.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vle.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -746,7 +746,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vle.mask.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vle_mask_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vle16.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vle.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -782,7 +782,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vle.mask.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vle_mask_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vle16.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vle.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -818,7 +818,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vle.mask.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vle_mask_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vle16.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vle.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -854,7 +854,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vle.mask.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vle_mask_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vle16.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vle.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -890,7 +890,7 @@ declare <vscale x 1 x half> @llvm.riscv.vle.mask.nxv1f16(
 define <vscale x 1 x half> @intrinsic_vle_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vle16.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vle.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -926,7 +926,7 @@ declare <vscale x 2 x half> @llvm.riscv.vle.mask.nxv2f16(
 define <vscale x 2 x half> @intrinsic_vle_mask_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vle16.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vle.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -962,7 +962,7 @@ declare <vscale x 4 x half> @llvm.riscv.vle.mask.nxv4f16(
 define <vscale x 4 x half> @intrinsic_vle_mask_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vle16.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vle.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -998,7 +998,7 @@ declare <vscale x 8 x half> @llvm.riscv.vle.mask.nxv8f16(
 define <vscale x 8 x half> @intrinsic_vle_mask_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vle16.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vle.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1034,7 +1034,7 @@ declare <vscale x 16 x half> @llvm.riscv.vle.mask.nxv16f16(
 define <vscale x 16 x half> @intrinsic_vle_mask_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vle16.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vle.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1070,7 +1070,7 @@ declare <vscale x 32 x half> @llvm.riscv.vle.mask.nxv32f16(
 define <vscale x 32 x half> @intrinsic_vle_mask_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv32f16_nxv32f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vle16.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 32 x half> @llvm.riscv.vle.mask.nxv32f16(
     <vscale x 32 x half> %0,
@@ -1106,7 +1106,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vle.mask.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vle_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vle8.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vle.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1142,7 +1142,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vle.mask.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vle_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vle8.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vle.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1178,7 +1178,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vle.mask.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vle_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vle8.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vle.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1214,7 +1214,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vle.mask.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vle_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vle8.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vle.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1250,7 +1250,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vle.mask.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vle_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vle8.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vle.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1286,7 +1286,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vle.mask.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vle_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vle8.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vle.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1322,7 +1322,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vle.mask.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vle_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vle8.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vle.mask.nxv64i8(
     <vscale x 64 x i8> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vleff-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vleff-rv32.ll
index ea882a5bf587..04987997b8e5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vleff-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vleff-rv32.ll
@@ -25,7 +25,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vleff.mask.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vleff_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vle32ff.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vleff.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -61,7 +61,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vleff.mask.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vleff_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vle32ff.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vleff.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -97,7 +97,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vleff.mask.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vleff_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vle32ff.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vleff.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -133,7 +133,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vleff.mask.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vleff_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vle32ff.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vleff.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -169,7 +169,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vleff.mask.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vleff_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vle32ff.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vleff.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -205,7 +205,7 @@ declare <vscale x 1 x float> @llvm.riscv.vleff.mask.nxv1f32(
 define <vscale x 1 x float> @intrinsic_vleff_mask_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vle32ff.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vleff.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -241,7 +241,7 @@ declare <vscale x 2 x float> @llvm.riscv.vleff.mask.nxv2f32(
 define <vscale x 2 x float> @intrinsic_vleff_mask_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vle32ff.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vleff.mask.nxv2f32(
     <vscale x 2 x float> %0,
@@ -277,7 +277,7 @@ declare <vscale x 4 x float> @llvm.riscv.vleff.mask.nxv4f32(
 define <vscale x 4 x float> @intrinsic_vleff_mask_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vle32ff.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vleff.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -313,7 +313,7 @@ declare <vscale x 8 x float> @llvm.riscv.vleff.mask.nxv8f32(
 define <vscale x 8 x float> @intrinsic_vleff_mask_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vle32ff.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vleff.mask.nxv8f32(
     <vscale x 8 x float> %0,
@@ -349,7 +349,7 @@ declare <vscale x 16 x float> @llvm.riscv.vleff.mask.nxv16f32(
 define <vscale x 16 x float> @intrinsic_vleff_mask_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16f32_nxv16f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vle32ff.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vleff.mask.nxv16f32(
     <vscale x 16 x float> %0,
@@ -385,7 +385,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vleff.mask.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vleff_mask_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vle16ff.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vleff.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -421,7 +421,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vleff.mask.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vleff_mask_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vle16ff.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vleff.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -457,7 +457,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vleff.mask.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vleff_mask_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vle16ff.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vleff.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -493,7 +493,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vleff.mask.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vleff_mask_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vle16ff.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vleff.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -529,7 +529,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vleff.mask.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vleff_mask_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vle16ff.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vleff.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -565,7 +565,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vleff.mask.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vleff_mask_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vle16ff.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vleff.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -601,7 +601,7 @@ declare <vscale x 1 x half> @llvm.riscv.vleff.mask.nxv1f16(
 define <vscale x 1 x half> @intrinsic_vleff_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vle16ff.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vleff.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -637,7 +637,7 @@ declare <vscale x 2 x half> @llvm.riscv.vleff.mask.nxv2f16(
 define <vscale x 2 x half> @intrinsic_vleff_mask_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vle16ff.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vleff.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -673,7 +673,7 @@ declare <vscale x 4 x half> @llvm.riscv.vleff.mask.nxv4f16(
 define <vscale x 4 x half> @intrinsic_vleff_mask_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vle16ff.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vleff.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -709,7 +709,7 @@ declare <vscale x 8 x half> @llvm.riscv.vleff.mask.nxv8f16(
 define <vscale x 8 x half> @intrinsic_vleff_mask_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vle16ff.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vleff.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -745,7 +745,7 @@ declare <vscale x 16 x half> @llvm.riscv.vleff.mask.nxv16f16(
 define <vscale x 16 x half> @intrinsic_vleff_mask_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vle16ff.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vleff.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -781,7 +781,7 @@ declare <vscale x 32 x half> @llvm.riscv.vleff.mask.nxv32f16(
 define <vscale x 32 x half> @intrinsic_vleff_mask_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv32f16_nxv32f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vle16ff.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 32 x half> @llvm.riscv.vleff.mask.nxv32f16(
     <vscale x 32 x half> %0,
@@ -817,7 +817,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vleff.mask.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vleff_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vle8ff.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vleff.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -853,7 +853,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vleff.mask.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vleff_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vle8ff.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vleff.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -889,7 +889,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vleff.mask.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vleff_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vle8ff.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vleff.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -925,7 +925,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vleff.mask.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vleff_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vle8ff.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vleff.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -961,7 +961,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vleff.mask.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vleff_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vle8ff.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vleff.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -997,7 +997,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vleff.mask.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vleff_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vle8ff.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vleff.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1033,7 +1033,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vleff.mask.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vleff_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vle8ff.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vleff.mask.nxv64i8(
     <vscale x 64 x i8> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vleff-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vleff-rv64.ll
index 560221c2536b..17c3cd5d1f8b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vleff-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vleff-rv64.ll
@@ -25,7 +25,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vleff.mask.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vleff_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vle64ff.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vleff.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -61,7 +61,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vleff.mask.nxv2i64(
 define <vscale x 2 x i64> @intrinsic_vleff_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i64_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vle64ff.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vleff.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -97,7 +97,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vleff.mask.nxv4i64(
 define <vscale x 4 x i64> @intrinsic_vleff_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i64_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vle64ff.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vleff.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -133,7 +133,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vleff.mask.nxv8i64(
 define <vscale x 8 x i64> @intrinsic_vleff_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i64_nxv8i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vle64ff.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vleff.mask.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -169,7 +169,7 @@ declare <vscale x 1 x double> @llvm.riscv.vleff.mask.nxv1f64(
 define <vscale x 1 x double> @intrinsic_vleff_mask_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1f64_nxv1f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vle64ff.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 1 x double> @llvm.riscv.vleff.mask.nxv1f64(
     <vscale x 1 x double> %0,
@@ -205,7 +205,7 @@ declare <vscale x 2 x double> @llvm.riscv.vleff.mask.nxv2f64(
 define <vscale x 2 x double> @intrinsic_vleff_mask_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2f64_nxv2f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vle64ff.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 2 x double> @llvm.riscv.vleff.mask.nxv2f64(
     <vscale x 2 x double> %0,
@@ -241,7 +241,7 @@ declare <vscale x 4 x double> @llvm.riscv.vleff.mask.nxv4f64(
 define <vscale x 4 x double> @intrinsic_vleff_mask_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4f64_nxv4f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vle64ff.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 4 x double> @llvm.riscv.vleff.mask.nxv4f64(
     <vscale x 4 x double> %0,
@@ -277,7 +277,7 @@ declare <vscale x 8 x double> @llvm.riscv.vleff.mask.nxv8f64(
 define <vscale x 8 x double> @intrinsic_vleff_mask_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8f64_nxv8f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vle64ff.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 8 x double> @llvm.riscv.vleff.mask.nxv8f64(
     <vscale x 8 x double> %0,
@@ -313,7 +313,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vleff.mask.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vleff_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vle32ff.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vleff.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -349,7 +349,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vleff.mask.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vleff_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vle32ff.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vleff.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -385,7 +385,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vleff.mask.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vleff_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vle32ff.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vleff.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -421,7 +421,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vleff.mask.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vleff_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vle32ff.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vleff.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -457,7 +457,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vleff.mask.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vleff_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vle32ff.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vleff.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -493,7 +493,7 @@ declare <vscale x 1 x float> @llvm.riscv.vleff.mask.nxv1f32(
 define <vscale x 1 x float> @intrinsic_vleff_mask_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vle32ff.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vleff.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -529,7 +529,7 @@ declare <vscale x 2 x float> @llvm.riscv.vleff.mask.nxv2f32(
 define <vscale x 2 x float> @intrinsic_vleff_mask_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vle32ff.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vleff.mask.nxv2f32(
     <vscale x 2 x float> %0,
@@ -565,7 +565,7 @@ declare <vscale x 4 x float> @llvm.riscv.vleff.mask.nxv4f32(
 define <vscale x 4 x float> @intrinsic_vleff_mask_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vle32ff.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vleff.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -601,7 +601,7 @@ declare <vscale x 8 x float> @llvm.riscv.vleff.mask.nxv8f32(
 define <vscale x 8 x float> @intrinsic_vleff_mask_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vle32ff.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vleff.mask.nxv8f32(
     <vscale x 8 x float> %0,
@@ -637,7 +637,7 @@ declare <vscale x 16 x float> @llvm.riscv.vleff.mask.nxv16f32(
 define <vscale x 16 x float> @intrinsic_vleff_mask_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16f32_nxv16f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vle32ff.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vleff.mask.nxv16f32(
     <vscale x 16 x float> %0,
@@ -673,7 +673,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vleff.mask.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vleff_mask_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vle16ff.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vleff.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -709,7 +709,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vleff.mask.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vleff_mask_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vle16ff.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vleff.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -745,7 +745,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vleff.mask.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vleff_mask_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vle16ff.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vleff.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -781,7 +781,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vleff.mask.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vleff_mask_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vle16ff.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vleff.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -817,7 +817,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vleff.mask.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vleff_mask_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vle16ff.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vleff.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -853,7 +853,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vleff.mask.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vleff_mask_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vle16ff.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vleff.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -889,7 +889,7 @@ declare <vscale x 1 x half> @llvm.riscv.vleff.mask.nxv1f16(
 define <vscale x 1 x half> @intrinsic_vleff_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vle16ff.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vleff.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -925,7 +925,7 @@ declare <vscale x 2 x half> @llvm.riscv.vleff.mask.nxv2f16(
 define <vscale x 2 x half> @intrinsic_vleff_mask_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vle16ff.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vleff.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -961,7 +961,7 @@ declare <vscale x 4 x half> @llvm.riscv.vleff.mask.nxv4f16(
 define <vscale x 4 x half> @intrinsic_vleff_mask_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vle16ff.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vleff.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -997,7 +997,7 @@ declare <vscale x 8 x half> @llvm.riscv.vleff.mask.nxv8f16(
 define <vscale x 8 x half> @intrinsic_vleff_mask_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vle16ff.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vleff.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1033,7 +1033,7 @@ declare <vscale x 16 x half> @llvm.riscv.vleff.mask.nxv16f16(
 define <vscale x 16 x half> @intrinsic_vleff_mask_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vle16ff.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vleff.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1069,7 +1069,7 @@ declare <vscale x 32 x half> @llvm.riscv.vleff.mask.nxv32f16(
 define <vscale x 32 x half> @intrinsic_vleff_mask_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv32f16_nxv32f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vle16ff.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 32 x half> @llvm.riscv.vleff.mask.nxv32f16(
     <vscale x 32 x half> %0,
@@ -1105,7 +1105,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vleff.mask.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vleff_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vle8ff.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vleff.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1141,7 +1141,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vleff.mask.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vleff_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vle8ff.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vleff.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1177,7 +1177,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vleff.mask.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vleff_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vle8ff.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vleff.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1213,7 +1213,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vleff.mask.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vleff_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vle8ff.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vleff.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1249,7 +1249,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vleff.mask.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vleff_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vle8ff.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vleff.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1285,7 +1285,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vleff.mask.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vleff_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vle8ff.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vleff.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1321,7 +1321,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vleff.mask.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vleff_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vle8ff.v {{v[0-9]+}}, (a0), v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vleff.mask.nxv64i8(
     <vscale x 64 x i8> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vlse-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vlse-rv32.ll
index 9c1032d53243..ecdd0450e73f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vlse-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vlse-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vlse.mask.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vlse_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, a2, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, a2, e32,mf2,tu,mu
 ; CHECK:       vlse32.v {{v[0-9]+}}, (a0), a1, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vlse.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vlse.mask.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vlse_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, a2, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, a2, e32,m1,tu,mu
 ; CHECK:       vlse32.v {{v[0-9]+}}, (a0), a1, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vlse.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vlse.mask.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vlse_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, a2, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, a2, e32,m2,tu,mu
 ; CHECK:       vlse32.v {{v[0-9]+}}, (a0), a1, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vlse.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vlse.mask.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vlse_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, a2, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, a2, e32,m4,tu,mu
 ; CHECK:       vlse32.v {{v[0-9]+}}, (a0), a1, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vlse.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vlse.mask.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vlse_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, a2, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, a2, e32,m8,tu,mu
 ; CHECK:       vlse32.v {{v[0-9]+}}, (a0), a1, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vlse.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -228,7 +228,7 @@ declare <vscale x 1 x float> @llvm.riscv.vlse.mask.nxv1f32(
 define <vscale x 1 x float> @intrinsic_vlse_mask_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, a2, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, a2, e32,mf2,tu,mu
 ; CHECK:       vlse32.v {{v[0-9]+}}, (a0), a1, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vlse.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -268,7 +268,7 @@ declare <vscale x 2 x float> @llvm.riscv.vlse.mask.nxv2f32(
 define <vscale x 2 x float> @intrinsic_vlse_mask_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, a2, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, a2, e32,m1,tu,mu
 ; CHECK:       vlse32.v {{v[0-9]+}}, (a0), a1, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vlse.mask.nxv2f32(
     <vscale x 2 x float> %0,
@@ -308,7 +308,7 @@ declare <vscale x 4 x float> @llvm.riscv.vlse.mask.nxv4f32(
 define <vscale x 4 x float> @intrinsic_vlse_mask_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, a2, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, a2, e32,m2,tu,mu
 ; CHECK:       vlse32.v {{v[0-9]+}}, (a0), a1, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vlse.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -348,7 +348,7 @@ declare <vscale x 8 x float> @llvm.riscv.vlse.mask.nxv8f32(
 define <vscale x 8 x float> @intrinsic_vlse_mask_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, a2, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, a2, e32,m4,tu,mu
 ; CHECK:       vlse32.v {{v[0-9]+}}, (a0), a1, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vlse.mask.nxv8f32(
     <vscale x 8 x float> %0,
@@ -388,7 +388,7 @@ declare <vscale x 16 x float> @llvm.riscv.vlse.mask.nxv16f32(
 define <vscale x 16 x float> @intrinsic_vlse_mask_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16f32_nxv16f32
-; CHECK:       vsetvli {{.*}}, a2, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, a2, e32,m8,tu,mu
 ; CHECK:       vlse32.v {{v[0-9]+}}, (a0), a1, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vlse.mask.nxv16f32(
     <vscale x 16 x float> %0,
@@ -428,7 +428,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vlse.mask.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vlse_mask_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, a2, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, a2, e16,mf4,tu,mu
 ; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vlse.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vlse.mask.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vlse_mask_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, a2, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, a2, e16,mf2,tu,mu
 ; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vlse.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vlse.mask.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vlse_mask_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, a2, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, a2, e16,m1,tu,mu
 ; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vlse.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vlse.mask.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vlse_mask_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, a2, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, a2, e16,m2,tu,mu
 ; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vlse.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -588,7 +588,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vlse.mask.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vlse_mask_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, a2, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, a2, e16,m4,tu,mu
 ; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vlse.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -628,7 +628,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vlse.mask.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vlse_mask_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, i32 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, a2, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, a2, e16,m8,tu,mu
 ; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vlse.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -668,7 +668,7 @@ declare <vscale x 1 x half> @llvm.riscv.vlse.mask.nxv1f16(
 define <vscale x 1 x half> @intrinsic_vlse_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, a2, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, a2, e16,mf4,tu,mu
 ; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vlse.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -708,7 +708,7 @@ declare <vscale x 2 x half> @llvm.riscv.vlse.mask.nxv2f16(
 define <vscale x 2 x half> @intrinsic_vlse_mask_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, a2, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, a2, e16,mf2,tu,mu
 ; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vlse.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -748,7 +748,7 @@ declare <vscale x 4 x half> @llvm.riscv.vlse.mask.nxv4f16(
 define <vscale x 4 x half> @intrinsic_vlse_mask_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, a2, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, a2, e16,m1,tu,mu
 ; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vlse.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -788,7 +788,7 @@ declare <vscale x 8 x half> @llvm.riscv.vlse.mask.nxv8f16(
 define <vscale x 8 x half> @intrinsic_vlse_mask_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, a2, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, a2, e16,m2,tu,mu
 ; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vlse.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -828,7 +828,7 @@ declare <vscale x 16 x half> @llvm.riscv.vlse.mask.nxv16f16(
 define <vscale x 16 x half> @intrinsic_vlse_mask_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, a2, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, a2, e16,m4,tu,mu
 ; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vlse.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -868,7 +868,7 @@ declare <vscale x 32 x half> @llvm.riscv.vlse.mask.nxv32f16(
 define <vscale x 32 x half> @intrinsic_vlse_mask_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, i32 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv32f16_nxv32f16
-; CHECK:       vsetvli {{.*}}, a2, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, a2, e16,m8,tu,mu
 ; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1, v0.t
   %a = call <vscale x 32 x half> @llvm.riscv.vlse.mask.nxv32f16(
     <vscale x 32 x half> %0,
@@ -908,7 +908,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vlse.mask.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vlse_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, a2, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, a2, e8,mf8,tu,mu
 ; CHECK:       vlse8.v {{v[0-9]+}}, (a0), a1, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vlse.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -948,7 +948,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vlse.mask.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vlse_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, a2, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, a2, e8,mf4,tu,mu
 ; CHECK:       vlse8.v {{v[0-9]+}}, (a0), a1, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vlse.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -988,7 +988,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vlse.mask.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vlse_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, a2, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, a2, e8,mf2,tu,mu
 ; CHECK:       vlse8.v {{v[0-9]+}}, (a0), a1, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vlse.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vlse.mask.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vlse_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, a2, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, a2, e8,m1,tu,mu
 ; CHECK:       vlse8.v {{v[0-9]+}}, (a0), a1, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vlse.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vlse.mask.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vlse_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, a2, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, a2, e8,m2,tu,mu
 ; CHECK:       vlse8.v {{v[0-9]+}}, (a0), a1, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vlse.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vlse.mask.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vlse_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, i32 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, a2, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, a2, e8,m4,tu,mu
 ; CHECK:       vlse8.v {{v[0-9]+}}, (a0), a1, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vlse.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vlse.mask.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vlse_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, i32 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, a2, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, a2, e8,m8,tu,mu
 ; CHECK:       vlse8.v {{v[0-9]+}}, (a0), a1, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vlse.mask.nxv64i8(
     <vscale x 64 x i8> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vlse-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vlse-rv64.ll
index 87b4c5f97914..8ef166f7f4c8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vlse-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vlse-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vlse.mask.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vlse_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i64_nxv1i64
-; CHECK:       vsetvli {{.*}}, a2, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, a2, e64,m1,tu,mu
 ; CHECK:       vlse64.v {{v[0-9]+}}, (a0), a1, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vlse.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vlse.mask.nxv2i64(
 define <vscale x 2 x i64> @intrinsic_vlse_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i64_nxv2i64
-; CHECK:       vsetvli {{.*}}, a2, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, a2, e64,m2,tu,mu
 ; CHECK:       vlse64.v {{v[0-9]+}}, (a0), a1, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vlse.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vlse.mask.nxv4i64(
 define <vscale x 4 x i64> @intrinsic_vlse_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i64_nxv4i64
-; CHECK:       vsetvli {{.*}}, a2, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, a2, e64,m4,tu,mu
 ; CHECK:       vlse64.v {{v[0-9]+}}, (a0), a1, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vlse.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vlse.mask.nxv8i64(
 define <vscale x 8 x i64> @intrinsic_vlse_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i64_nxv8i64
-; CHECK:       vsetvli {{.*}}, a2, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, a2, e64,m8,tu,mu
 ; CHECK:       vlse64.v {{v[0-9]+}}, (a0), a1, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vlse.mask.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -188,7 +188,7 @@ declare <vscale x 1 x double> @llvm.riscv.vlse.mask.nxv1f64(
 define <vscale x 1 x double> @intrinsic_vlse_mask_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1f64_nxv1f64
-; CHECK:       vsetvli {{.*}}, a2, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, a2, e64,m1,tu,mu
 ; CHECK:       vlse64.v {{v[0-9]+}}, (a0), a1, v0.t
   %a = call <vscale x 1 x double> @llvm.riscv.vlse.mask.nxv1f64(
     <vscale x 1 x double> %0,
@@ -228,7 +228,7 @@ declare <vscale x 2 x double> @llvm.riscv.vlse.mask.nxv2f64(
 define <vscale x 2 x double> @intrinsic_vlse_mask_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2f64_nxv2f64
-; CHECK:       vsetvli {{.*}}, a2, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, a2, e64,m2,tu,mu
 ; CHECK:       vlse64.v {{v[0-9]+}}, (a0), a1, v0.t
   %a = call <vscale x 2 x double> @llvm.riscv.vlse.mask.nxv2f64(
     <vscale x 2 x double> %0,
@@ -268,7 +268,7 @@ declare <vscale x 4 x double> @llvm.riscv.vlse.mask.nxv4f64(
 define <vscale x 4 x double> @intrinsic_vlse_mask_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4f64_nxv4f64
-; CHECK:       vsetvli {{.*}}, a2, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, a2, e64,m4,tu,mu
 ; CHECK:       vlse64.v {{v[0-9]+}}, (a0), a1, v0.t
   %a = call <vscale x 4 x double> @llvm.riscv.vlse.mask.nxv4f64(
     <vscale x 4 x double> %0,
@@ -308,7 +308,7 @@ declare <vscale x 8 x double> @llvm.riscv.vlse.mask.nxv8f64(
 define <vscale x 8 x double> @intrinsic_vlse_mask_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8f64_nxv8f64
-; CHECK:       vsetvli {{.*}}, a2, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, a2, e64,m8,tu,mu
 ; CHECK:       vlse64.v {{v[0-9]+}}, (a0), a1, v0.t
   %a = call <vscale x 8 x double> @llvm.riscv.vlse.mask.nxv8f64(
     <vscale x 8 x double> %0,
@@ -348,7 +348,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vlse.mask.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vlse_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, a2, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, a2, e32,mf2,tu,mu
 ; CHECK:       vlse32.v {{v[0-9]+}}, (a0), a1, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vlse.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -388,7 +388,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vlse.mask.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vlse_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, a2, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, a2, e32,m1,tu,mu
 ; CHECK:       vlse32.v {{v[0-9]+}}, (a0), a1, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vlse.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -428,7 +428,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vlse.mask.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vlse_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, a2, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, a2, e32,m2,tu,mu
 ; CHECK:       vlse32.v {{v[0-9]+}}, (a0), a1, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vlse.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -468,7 +468,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vlse.mask.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vlse_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, a2, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, a2, e32,m4,tu,mu
 ; CHECK:       vlse32.v {{v[0-9]+}}, (a0), a1, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vlse.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -508,7 +508,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vlse.mask.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vlse_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, a2, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, a2, e32,m8,tu,mu
 ; CHECK:       vlse32.v {{v[0-9]+}}, (a0), a1, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vlse.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -548,7 +548,7 @@ declare <vscale x 1 x float> @llvm.riscv.vlse.mask.nxv1f32(
 define <vscale x 1 x float> @intrinsic_vlse_mask_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, a2, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, a2, e32,mf2,tu,mu
 ; CHECK:       vlse32.v {{v[0-9]+}}, (a0), a1, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vlse.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -588,7 +588,7 @@ declare <vscale x 2 x float> @llvm.riscv.vlse.mask.nxv2f32(
 define <vscale x 2 x float> @intrinsic_vlse_mask_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, a2, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, a2, e32,m1,tu,mu
 ; CHECK:       vlse32.v {{v[0-9]+}}, (a0), a1, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vlse.mask.nxv2f32(
     <vscale x 2 x float> %0,
@@ -628,7 +628,7 @@ declare <vscale x 4 x float> @llvm.riscv.vlse.mask.nxv4f32(
 define <vscale x 4 x float> @intrinsic_vlse_mask_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, a2, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, a2, e32,m2,tu,mu
 ; CHECK:       vlse32.v {{v[0-9]+}}, (a0), a1, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vlse.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -668,7 +668,7 @@ declare <vscale x 8 x float> @llvm.riscv.vlse.mask.nxv8f32(
 define <vscale x 8 x float> @intrinsic_vlse_mask_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, a2, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, a2, e32,m4,tu,mu
 ; CHECK:       vlse32.v {{v[0-9]+}}, (a0), a1, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vlse.mask.nxv8f32(
     <vscale x 8 x float> %0,
@@ -708,7 +708,7 @@ declare <vscale x 16 x float> @llvm.riscv.vlse.mask.nxv16f32(
 define <vscale x 16 x float> @intrinsic_vlse_mask_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16f32_nxv16f32
-; CHECK:       vsetvli {{.*}}, a2, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, a2, e32,m8,tu,mu
 ; CHECK:       vlse32.v {{v[0-9]+}}, (a0), a1, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vlse.mask.nxv16f32(
     <vscale x 16 x float> %0,
@@ -748,7 +748,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vlse.mask.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vlse_mask_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, a2, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, a2, e16,mf4,tu,mu
 ; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vlse.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -788,7 +788,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vlse.mask.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vlse_mask_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, a2, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, a2, e16,mf2,tu,mu
 ; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vlse.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -828,7 +828,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vlse.mask.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vlse_mask_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, a2, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, a2, e16,m1,tu,mu
 ; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vlse.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -868,7 +868,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vlse.mask.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vlse_mask_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, a2, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, a2, e16,m2,tu,mu
 ; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vlse.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -908,7 +908,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vlse.mask.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vlse_mask_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, a2, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, a2, e16,m4,tu,mu
 ; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vlse.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -948,7 +948,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vlse.mask.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vlse_mask_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, i64 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, a2, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, a2, e16,m8,tu,mu
 ; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vlse.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -988,7 +988,7 @@ declare <vscale x 1 x half> @llvm.riscv.vlse.mask.nxv1f16(
 define <vscale x 1 x half> @intrinsic_vlse_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, a2, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, a2, e16,mf4,tu,mu
 ; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vlse.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 2 x half> @llvm.riscv.vlse.mask.nxv2f16(
 define <vscale x 2 x half> @intrinsic_vlse_mask_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, a2, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, a2, e16,mf2,tu,mu
 ; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vlse.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 4 x half> @llvm.riscv.vlse.mask.nxv4f16(
 define <vscale x 4 x half> @intrinsic_vlse_mask_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, a2, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, a2, e16,m1,tu,mu
 ; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vlse.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 8 x half> @llvm.riscv.vlse.mask.nxv8f16(
 define <vscale x 8 x half> @intrinsic_vlse_mask_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, a2, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, a2, e16,m2,tu,mu
 ; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vlse.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 16 x half> @llvm.riscv.vlse.mask.nxv16f16(
 define <vscale x 16 x half> @intrinsic_vlse_mask_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, a2, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, a2, e16,m4,tu,mu
 ; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vlse.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 32 x half> @llvm.riscv.vlse.mask.nxv32f16(
 define <vscale x 32 x half> @intrinsic_vlse_mask_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, i64 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv32f16_nxv32f16
-; CHECK:       vsetvli {{.*}}, a2, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, a2, e16,m8,tu,mu
 ; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1, v0.t
   %a = call <vscale x 32 x half> @llvm.riscv.vlse.mask.nxv32f16(
     <vscale x 32 x half> %0,
@@ -1228,7 +1228,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vlse.mask.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vlse_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, a2, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, a2, e8,mf8,tu,mu
 ; CHECK:       vlse8.v {{v[0-9]+}}, (a0), a1, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vlse.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1268,7 +1268,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vlse.mask.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vlse_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, a2, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, a2, e8,mf4,tu,mu
 ; CHECK:       vlse8.v {{v[0-9]+}}, (a0), a1, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vlse.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1308,7 +1308,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vlse.mask.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vlse_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, a2, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, a2, e8,mf2,tu,mu
 ; CHECK:       vlse8.v {{v[0-9]+}}, (a0), a1, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vlse.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1348,7 +1348,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vlse.mask.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vlse_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, a2, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, a2, e8,m1,tu,mu
 ; CHECK:       vlse8.v {{v[0-9]+}}, (a0), a1, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vlse.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1388,7 +1388,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vlse.mask.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vlse_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, a2, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, a2, e8,m2,tu,mu
 ; CHECK:       vlse8.v {{v[0-9]+}}, (a0), a1, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vlse.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1428,7 +1428,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vlse.mask.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vlse_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, i64 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, a2, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, a2, e8,m4,tu,mu
 ; CHECK:       vlse8.v {{v[0-9]+}}, (a0), a1, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vlse.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1468,7 +1468,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vlse.mask.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vlse_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, i64 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, a2, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, a2, e8,m8,tu,mu
 ; CHECK:       vlse8.v {{v[0-9]+}}, (a0), a1, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vlse.mask.nxv64i8(
     <vscale x 64 x i8> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vlxe-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vlxe-rv32.ll
index d9b23e17a86d..0792865f2a6e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vlxe-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vlxe-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vlxe.mask.nxv1i8.nxv1i32(
 define <vscale x 1 x i8> @intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vlxe.mask.nxv1i8.nxv1i32(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vlxe.mask.nxv2i8.nxv2i32(
 define <vscale x 2 x i8> @intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vlxe.mask.nxv2i8.nxv2i32(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vlxe.mask.nxv4i8.nxv4i32(
 define <vscale x 4 x i8> @intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vlxe.mask.nxv4i8.nxv4i32(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vlxe.mask.nxv8i8.nxv8i32(
 define <vscale x 8 x i8> @intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vlxe.mask.nxv8i8.nxv8i32(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vlxe.mask.nxv16i8.nxv16i32(
 define <vscale x 16 x i8> @intrinsic_vlxe_mask_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i8_nxv16i8_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vlxe.mask.nxv16i8.nxv16i32(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vlxe.mask.nxv1i16.nxv1i32(
 define <vscale x 1 x i16> @intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vlxe.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
@@ -268,7 +268,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vlxe.mask.nxv2i16.nxv2i32(
 define <vscale x 2 x i16> @intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vlxe.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
@@ -308,7 +308,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vlxe.mask.nxv4i16.nxv4i32(
 define <vscale x 4 x i16> @intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vlxe.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vlxe.mask.nxv8i16.nxv8i32(
 define <vscale x 8 x i16> @intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vlxe.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vlxe.mask.nxv16i16.nxv16i32(
 define <vscale x 16 x i16> @intrinsic_vlxe_mask_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i16_nxv16i16_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vlxe.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vlxe.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vlxe.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -468,7 +468,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vlxe.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vlxe.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -508,7 +508,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vlxe.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vlxe.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -548,7 +548,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vlxe.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vlxe.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -588,7 +588,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vlxe.mask.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vlxe_mask_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i32_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vlxe.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -628,7 +628,7 @@ declare <vscale x 1 x half> @llvm.riscv.vlxe.mask.nxv1f16.nxv1i32(
 define <vscale x 1 x half> @intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vlxe.mask.nxv1f16.nxv1i32(
     <vscale x 1 x half> %0,
@@ -668,7 +668,7 @@ declare <vscale x 2 x half> @llvm.riscv.vlxe.mask.nxv2f16.nxv2i32(
 define <vscale x 2 x half> @intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vlxe.mask.nxv2f16.nxv2i32(
     <vscale x 2 x half> %0,
@@ -708,7 +708,7 @@ declare <vscale x 4 x half> @llvm.riscv.vlxe.mask.nxv4f16.nxv4i32(
 define <vscale x 4 x half> @intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vlxe.mask.nxv4f16.nxv4i32(
     <vscale x 4 x half> %0,
@@ -748,7 +748,7 @@ declare <vscale x 8 x half> @llvm.riscv.vlxe.mask.nxv8f16.nxv8i32(
 define <vscale x 8 x half> @intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vlxe.mask.nxv8f16.nxv8i32(
     <vscale x 8 x half> %0,
@@ -788,7 +788,7 @@ declare <vscale x 16 x half> @llvm.riscv.vlxe.mask.nxv16f16.nxv16i32(
 define <vscale x 16 x half> @intrinsic_vlxe_mask_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16f16_nxv16f16_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vlxe.mask.nxv16f16.nxv16i32(
     <vscale x 16 x half> %0,
@@ -828,7 +828,7 @@ declare <vscale x 1 x float> @llvm.riscv.vlxe.mask.nxv1f32.nxv1i32(
 define <vscale x 1 x float> @intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vlxe.mask.nxv1f32.nxv1i32(
     <vscale x 1 x float> %0,
@@ -868,7 +868,7 @@ declare <vscale x 2 x float> @llvm.riscv.vlxe.mask.nxv2f32.nxv2i32(
 define <vscale x 2 x float> @intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vlxe.mask.nxv2f32.nxv2i32(
     <vscale x 2 x float> %0,
@@ -908,7 +908,7 @@ declare <vscale x 4 x float> @llvm.riscv.vlxe.mask.nxv4f32.nxv4i32(
 define <vscale x 4 x float> @intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vlxe.mask.nxv4f32.nxv4i32(
     <vscale x 4 x float> %0,
@@ -948,7 +948,7 @@ declare <vscale x 8 x float> @llvm.riscv.vlxe.mask.nxv8f32.nxv8i32(
 define <vscale x 8 x float> @intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vlxe.mask.nxv8f32.nxv8i32(
     <vscale x 8 x float> %0,
@@ -988,7 +988,7 @@ declare <vscale x 16 x float> @llvm.riscv.vlxe.mask.nxv16f32.nxv16i32(
 define <vscale x 16 x float> @intrinsic_vlxe_mask_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16f32_nxv16f32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vlxe.mask.nxv16f32.nxv16i32(
     <vscale x 16 x float> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vlxe.mask.nxv1i8.nxv1i16(
 define <vscale x 1 x i8> @intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vlxe.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vlxe.mask.nxv2i8.nxv2i16(
 define <vscale x 2 x i8> @intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vlxe.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vlxe.mask.nxv4i8.nxv4i16(
 define <vscale x 4 x i8> @intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vlxe.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vlxe.mask.nxv8i8.nxv8i16(
 define <vscale x 8 x i8> @intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vlxe.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vlxe.mask.nxv16i8.nxv16i16(
 define <vscale x 16 x i8> @intrinsic_vlxe_mask_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i8_nxv16i8_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vlxe.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
@@ -1228,7 +1228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vlxe.mask.nxv32i8.nxv32i16(
 define <vscale x 32 x i8> @intrinsic_vlxe_mask_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv32i8_nxv32i8_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vlxe.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
@@ -1268,7 +1268,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vlxe.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vlxe.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -1308,7 +1308,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vlxe.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vlxe.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -1348,7 +1348,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vlxe.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vlxe.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -1388,7 +1388,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vlxe.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vlxe.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -1428,7 +1428,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vlxe.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vlxe_mask_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vlxe.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -1468,7 +1468,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vlxe.mask.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vlxe_mask_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv32i16_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vlxe.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1508,7 +1508,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vlxe.mask.nxv1i32.nxv1i16(
 define <vscale x 1 x i32> @intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vlxe.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -1548,7 +1548,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vlxe.mask.nxv2i32.nxv2i16(
 define <vscale x 2 x i32> @intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vlxe.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -1588,7 +1588,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vlxe.mask.nxv4i32.nxv4i16(
 define <vscale x 4 x i32> @intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vlxe.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -1628,7 +1628,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vlxe.mask.nxv8i32.nxv8i16(
 define <vscale x 8 x i32> @intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vlxe.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -1668,7 +1668,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vlxe.mask.nxv16i32.nxv16i16(
 define <vscale x 16 x i32> @intrinsic_vlxe_mask_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i32_nxv16i32_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vlxe.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -1708,7 +1708,7 @@ declare <vscale x 1 x half> @llvm.riscv.vlxe.mask.nxv1f16.nxv1i16(
 define <vscale x 1 x half> @intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vlxe.mask.nxv1f16.nxv1i16(
     <vscale x 1 x half> %0,
@@ -1748,7 +1748,7 @@ declare <vscale x 2 x half> @llvm.riscv.vlxe.mask.nxv2f16.nxv2i16(
 define <vscale x 2 x half> @intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vlxe.mask.nxv2f16.nxv2i16(
     <vscale x 2 x half> %0,
@@ -1788,7 +1788,7 @@ declare <vscale x 4 x half> @llvm.riscv.vlxe.mask.nxv4f16.nxv4i16(
 define <vscale x 4 x half> @intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vlxe.mask.nxv4f16.nxv4i16(
     <vscale x 4 x half> %0,
@@ -1828,7 +1828,7 @@ declare <vscale x 8 x half> @llvm.riscv.vlxe.mask.nxv8f16.nxv8i16(
 define <vscale x 8 x half> @intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vlxe.mask.nxv8f16.nxv8i16(
     <vscale x 8 x half> %0,
@@ -1868,7 +1868,7 @@ declare <vscale x 16 x half> @llvm.riscv.vlxe.mask.nxv16f16.nxv16i16(
 define <vscale x 16 x half> @intrinsic_vlxe_mask_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16f16_nxv16f16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vlxe.mask.nxv16f16.nxv16i16(
     <vscale x 16 x half> %0,
@@ -1908,7 +1908,7 @@ declare <vscale x 32 x half> @llvm.riscv.vlxe.mask.nxv32f16.nxv32i16(
 define <vscale x 32 x half> @intrinsic_vlxe_mask_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv32f16_nxv32f16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x half> @llvm.riscv.vlxe.mask.nxv32f16.nxv32i16(
     <vscale x 32 x half> %0,
@@ -1948,7 +1948,7 @@ declare <vscale x 1 x float> @llvm.riscv.vlxe.mask.nxv1f32.nxv1i16(
 define <vscale x 1 x float> @intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vlxe.mask.nxv1f32.nxv1i16(
     <vscale x 1 x float> %0,
@@ -1988,7 +1988,7 @@ declare <vscale x 2 x float> @llvm.riscv.vlxe.mask.nxv2f32.nxv2i16(
 define <vscale x 2 x float> @intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vlxe.mask.nxv2f32.nxv2i16(
     <vscale x 2 x float> %0,
@@ -2028,7 +2028,7 @@ declare <vscale x 4 x float> @llvm.riscv.vlxe.mask.nxv4f32.nxv4i16(
 define <vscale x 4 x float> @intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vlxe.mask.nxv4f32.nxv4i16(
     <vscale x 4 x float> %0,
@@ -2068,7 +2068,7 @@ declare <vscale x 8 x float> @llvm.riscv.vlxe.mask.nxv8f32.nxv8i16(
 define <vscale x 8 x float> @intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vlxe.mask.nxv8f32.nxv8i16(
     <vscale x 8 x float> %0,
@@ -2108,7 +2108,7 @@ declare <vscale x 16 x float> @llvm.riscv.vlxe.mask.nxv16f32.nxv16i16(
 define <vscale x 16 x float> @intrinsic_vlxe_mask_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16f32_nxv16f32_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vlxe.mask.nxv16f32.nxv16i16(
     <vscale x 16 x float> %0,
@@ -2148,7 +2148,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vlxe.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vlxe.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -2188,7 +2188,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vlxe.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vlxe.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -2228,7 +2228,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vlxe.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vlxe.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -2268,7 +2268,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vlxe.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vlxe.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -2308,7 +2308,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vlxe.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vlxe_mask_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vlxe.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -2348,7 +2348,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vlxe.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vlxe_mask_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vlxe.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -2388,7 +2388,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vlxe.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vlxe_mask_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv64i8_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vlxe.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -2428,7 +2428,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vlxe.mask.nxv1i16.nxv1i8(
 define <vscale x 1 x i16> @intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vlxe.mask.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -2468,7 +2468,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vlxe.mask.nxv2i16.nxv2i8(
 define <vscale x 2 x i16> @intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vlxe.mask.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -2508,7 +2508,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vlxe.mask.nxv4i16.nxv4i8(
 define <vscale x 4 x i16> @intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vlxe.mask.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -2548,7 +2548,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vlxe.mask.nxv8i16.nxv8i8(
 define <vscale x 8 x i16> @intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vlxe.mask.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -2588,7 +2588,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vlxe.mask.nxv16i16.nxv16i8(
 define <vscale x 16 x i16> @intrinsic_vlxe_mask_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i16_nxv16i16_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vlxe.mask.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -2628,7 +2628,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vlxe.mask.nxv32i16.nxv32i8(
 define <vscale x 32 x i16> @intrinsic_vlxe_mask_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv32i16_nxv32i16_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vlxe.mask.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -2668,7 +2668,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vlxe.mask.nxv1i32.nxv1i8(
 define <vscale x 1 x i32> @intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vlxe.mask.nxv1i32.nxv1i8(
     <vscale x 1 x i32> %0,
@@ -2708,7 +2708,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vlxe.mask.nxv2i32.nxv2i8(
 define <vscale x 2 x i32> @intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vlxe.mask.nxv2i32.nxv2i8(
     <vscale x 2 x i32> %0,
@@ -2748,7 +2748,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vlxe.mask.nxv4i32.nxv4i8(
 define <vscale x 4 x i32> @intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vlxe.mask.nxv4i32.nxv4i8(
     <vscale x 4 x i32> %0,
@@ -2788,7 +2788,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vlxe.mask.nxv8i32.nxv8i8(
 define <vscale x 8 x i32> @intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vlxe.mask.nxv8i32.nxv8i8(
     <vscale x 8 x i32> %0,
@@ -2828,7 +2828,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vlxe.mask.nxv16i32.nxv16i8(
 define <vscale x 16 x i32> @intrinsic_vlxe_mask_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i32_nxv16i32_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vlxe.mask.nxv16i32.nxv16i8(
     <vscale x 16 x i32> %0,
@@ -2868,7 +2868,7 @@ declare <vscale x 1 x half> @llvm.riscv.vlxe.mask.nxv1f16.nxv1i8(
 define <vscale x 1 x half> @intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vlxe.mask.nxv1f16.nxv1i8(
     <vscale x 1 x half> %0,
@@ -2908,7 +2908,7 @@ declare <vscale x 2 x half> @llvm.riscv.vlxe.mask.nxv2f16.nxv2i8(
 define <vscale x 2 x half> @intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vlxe.mask.nxv2f16.nxv2i8(
     <vscale x 2 x half> %0,
@@ -2948,7 +2948,7 @@ declare <vscale x 4 x half> @llvm.riscv.vlxe.mask.nxv4f16.nxv4i8(
 define <vscale x 4 x half> @intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vlxe.mask.nxv4f16.nxv4i8(
     <vscale x 4 x half> %0,
@@ -2988,7 +2988,7 @@ declare <vscale x 8 x half> @llvm.riscv.vlxe.mask.nxv8f16.nxv8i8(
 define <vscale x 8 x half> @intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vlxe.mask.nxv8f16.nxv8i8(
     <vscale x 8 x half> %0,
@@ -3028,7 +3028,7 @@ declare <vscale x 16 x half> @llvm.riscv.vlxe.mask.nxv16f16.nxv16i8(
 define <vscale x 16 x half> @intrinsic_vlxe_mask_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16f16_nxv16f16_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vlxe.mask.nxv16f16.nxv16i8(
     <vscale x 16 x half> %0,
@@ -3068,7 +3068,7 @@ declare <vscale x 32 x half> @llvm.riscv.vlxe.mask.nxv32f16.nxv32i8(
 define <vscale x 32 x half> @intrinsic_vlxe_mask_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv32f16_nxv32f16_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x half> @llvm.riscv.vlxe.mask.nxv32f16.nxv32i8(
     <vscale x 32 x half> %0,
@@ -3108,7 +3108,7 @@ declare <vscale x 1 x float> @llvm.riscv.vlxe.mask.nxv1f32.nxv1i8(
 define <vscale x 1 x float> @intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vlxe.mask.nxv1f32.nxv1i8(
     <vscale x 1 x float> %0,
@@ -3148,7 +3148,7 @@ declare <vscale x 2 x float> @llvm.riscv.vlxe.mask.nxv2f32.nxv2i8(
 define <vscale x 2 x float> @intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vlxe.mask.nxv2f32.nxv2i8(
     <vscale x 2 x float> %0,
@@ -3188,7 +3188,7 @@ declare <vscale x 4 x float> @llvm.riscv.vlxe.mask.nxv4f32.nxv4i8(
 define <vscale x 4 x float> @intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vlxe.mask.nxv4f32.nxv4i8(
     <vscale x 4 x float> %0,
@@ -3228,7 +3228,7 @@ declare <vscale x 8 x float> @llvm.riscv.vlxe.mask.nxv8f32.nxv8i8(
 define <vscale x 8 x float> @intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vlxe.mask.nxv8f32.nxv8i8(
     <vscale x 8 x float> %0,
@@ -3268,7 +3268,7 @@ declare <vscale x 16 x float> @llvm.riscv.vlxe.mask.nxv16f32.nxv16i8(
 define <vscale x 16 x float> @intrinsic_vlxe_mask_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16f32_nxv16f32_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vlxe.mask.nxv16f32.nxv16i8(
     <vscale x 16 x float> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vlxe-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vlxe-rv64.ll
index 5ecb7d502942..e76fbb0178be 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vlxe-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vlxe-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vlxe.mask.nxv1i8.nxv1i64(
 define <vscale x 1 x i8> @intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vlxe.mask.nxv1i8.nxv1i64(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vlxe.mask.nxv2i8.nxv2i64(
 define <vscale x 2 x i8> @intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vlxe.mask.nxv2i8.nxv2i64(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vlxe.mask.nxv4i8.nxv4i64(
 define <vscale x 4 x i8> @intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vlxe.mask.nxv4i8.nxv4i64(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vlxe.mask.nxv8i8.nxv8i64(
 define <vscale x 8 x i8> @intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vlxe.mask.nxv8i8.nxv8i64(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vlxe.mask.nxv1i16.nxv1i64(
 define <vscale x 1 x i16> @intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vlxe.mask.nxv1i16.nxv1i64(
     <vscale x 1 x i16> %0,
@@ -228,7 +228,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vlxe.mask.nxv2i16.nxv2i64(
 define <vscale x 2 x i16> @intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vlxe.mask.nxv2i16.nxv2i64(
     <vscale x 2 x i16> %0,
@@ -268,7 +268,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vlxe.mask.nxv4i16.nxv4i64(
 define <vscale x 4 x i16> @intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vlxe.mask.nxv4i16.nxv4i64(
     <vscale x 4 x i16> %0,
@@ -308,7 +308,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vlxe.mask.nxv8i16.nxv8i64(
 define <vscale x 8 x i16> @intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vlxe.mask.nxv8i16.nxv8i64(
     <vscale x 8 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vlxe.mask.nxv1i32.nxv1i64(
 define <vscale x 1 x i32> @intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vlxe.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
@@ -388,7 +388,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vlxe.mask.nxv2i32.nxv2i64(
 define <vscale x 2 x i32> @intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vlxe.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
@@ -428,7 +428,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vlxe.mask.nxv4i32.nxv4i64(
 define <vscale x 4 x i32> @intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vlxe.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
@@ -468,7 +468,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vlxe.mask.nxv8i32.nxv8i64(
 define <vscale x 8 x i32> @intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vlxe.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,
@@ -508,7 +508,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vlxe.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vlxe_mask_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i64_nxv1i64_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vlxe.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -548,7 +548,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vlxe.mask.nxv2i64.nxv2i64(
 define <vscale x 2 x i64> @intrinsic_vlxe_mask_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i64_nxv2i64_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vlxe.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -588,7 +588,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vlxe.mask.nxv4i64.nxv4i64(
 define <vscale x 4 x i64> @intrinsic_vlxe_mask_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i64_nxv4i64_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vlxe.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -628,7 +628,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vlxe.mask.nxv8i64.nxv8i64(
 define <vscale x 8 x i64> @intrinsic_vlxe_mask_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i64_nxv8i64_nxv8i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vlxe.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -668,7 +668,7 @@ declare <vscale x 1 x half> @llvm.riscv.vlxe.mask.nxv1f16.nxv1i64(
 define <vscale x 1 x half> @intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vlxe.mask.nxv1f16.nxv1i64(
     <vscale x 1 x half> %0,
@@ -708,7 +708,7 @@ declare <vscale x 2 x half> @llvm.riscv.vlxe.mask.nxv2f16.nxv2i64(
 define <vscale x 2 x half> @intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vlxe.mask.nxv2f16.nxv2i64(
     <vscale x 2 x half> %0,
@@ -748,7 +748,7 @@ declare <vscale x 4 x half> @llvm.riscv.vlxe.mask.nxv4f16.nxv4i64(
 define <vscale x 4 x half> @intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vlxe.mask.nxv4f16.nxv4i64(
     <vscale x 4 x half> %0,
@@ -788,7 +788,7 @@ declare <vscale x 8 x half> @llvm.riscv.vlxe.mask.nxv8f16.nxv8i64(
 define <vscale x 8 x half> @intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vlxe.mask.nxv8f16.nxv8i64(
     <vscale x 8 x half> %0,
@@ -828,7 +828,7 @@ declare <vscale x 1 x float> @llvm.riscv.vlxe.mask.nxv1f32.nxv1i64(
 define <vscale x 1 x float> @intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vlxe.mask.nxv1f32.nxv1i64(
     <vscale x 1 x float> %0,
@@ -868,7 +868,7 @@ declare <vscale x 2 x float> @llvm.riscv.vlxe.mask.nxv2f32.nxv2i64(
 define <vscale x 2 x float> @intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vlxe.mask.nxv2f32.nxv2i64(
     <vscale x 2 x float> %0,
@@ -908,7 +908,7 @@ declare <vscale x 4 x float> @llvm.riscv.vlxe.mask.nxv4f32.nxv4i64(
 define <vscale x 4 x float> @intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vlxe.mask.nxv4f32.nxv4i64(
     <vscale x 4 x float> %0,
@@ -948,7 +948,7 @@ declare <vscale x 8 x float> @llvm.riscv.vlxe.mask.nxv8f32.nxv8i64(
 define <vscale x 8 x float> @intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vlxe.mask.nxv8f32.nxv8i64(
     <vscale x 8 x float> %0,
@@ -988,7 +988,7 @@ declare <vscale x 1 x double> @llvm.riscv.vlxe.mask.nxv1f64.nxv1i64(
 define <vscale x 1 x double> @intrinsic_vlxe_mask_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f64_nxv1f64_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x double> @llvm.riscv.vlxe.mask.nxv1f64.nxv1i64(
     <vscale x 1 x double> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 2 x double> @llvm.riscv.vlxe.mask.nxv2f64.nxv2i64(
 define <vscale x 2 x double> @intrinsic_vlxe_mask_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f64_nxv2f64_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x double> @llvm.riscv.vlxe.mask.nxv2f64.nxv2i64(
     <vscale x 2 x double> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 4 x double> @llvm.riscv.vlxe.mask.nxv4f64.nxv4i64(
 define <vscale x 4 x double> @intrinsic_vlxe_mask_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f64_nxv4f64_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x double> @llvm.riscv.vlxe.mask.nxv4f64.nxv4i64(
     <vscale x 4 x double> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 8 x double> @llvm.riscv.vlxe.mask.nxv8f64.nxv8i64(
 define <vscale x 8 x double> @intrinsic_vlxe_mask_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f64_nxv8f64_nxv8i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x double> @llvm.riscv.vlxe.mask.nxv8f64.nxv8i64(
     <vscale x 8 x double> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vlxe.mask.nxv1i8.nxv1i32(
 define <vscale x 1 x i8> @intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vlxe.mask.nxv1i8.nxv1i32(
     <vscale x 1 x i8> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vlxe.mask.nxv2i8.nxv2i32(
 define <vscale x 2 x i8> @intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vlxe.mask.nxv2i8.nxv2i32(
     <vscale x 2 x i8> %0,
@@ -1228,7 +1228,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vlxe.mask.nxv4i8.nxv4i32(
 define <vscale x 4 x i8> @intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vlxe.mask.nxv4i8.nxv4i32(
     <vscale x 4 x i8> %0,
@@ -1268,7 +1268,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vlxe.mask.nxv8i8.nxv8i32(
 define <vscale x 8 x i8> @intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vlxe.mask.nxv8i8.nxv8i32(
     <vscale x 8 x i8> %0,
@@ -1308,7 +1308,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vlxe.mask.nxv16i8.nxv16i32(
 define <vscale x 16 x i8> @intrinsic_vlxe_mask_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i8_nxv16i8_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vlxe.mask.nxv16i8.nxv16i32(
     <vscale x 16 x i8> %0,
@@ -1348,7 +1348,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vlxe.mask.nxv1i16.nxv1i32(
 define <vscale x 1 x i16> @intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vlxe.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
@@ -1388,7 +1388,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vlxe.mask.nxv2i16.nxv2i32(
 define <vscale x 2 x i16> @intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vlxe.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
@@ -1428,7 +1428,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vlxe.mask.nxv4i16.nxv4i32(
 define <vscale x 4 x i16> @intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vlxe.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
@@ -1468,7 +1468,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vlxe.mask.nxv8i16.nxv8i32(
 define <vscale x 8 x i16> @intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vlxe.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
@@ -1508,7 +1508,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vlxe.mask.nxv16i16.nxv16i32(
 define <vscale x 16 x i16> @intrinsic_vlxe_mask_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i16_nxv16i16_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vlxe.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
@@ -1548,7 +1548,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vlxe.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vlxe.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1588,7 +1588,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vlxe.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vlxe.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1628,7 +1628,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vlxe.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vlxe.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1668,7 +1668,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vlxe.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vlxe.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1708,7 +1708,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vlxe.mask.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vlxe_mask_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i32_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vlxe.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1748,7 +1748,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vlxe.mask.nxv1i64.nxv1i32(
 define <vscale x 1 x i64> @intrinsic_vlxe_mask_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i64_nxv1i64_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vlxe.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -1788,7 +1788,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vlxe.mask.nxv2i64.nxv2i32(
 define <vscale x 2 x i64> @intrinsic_vlxe_mask_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i64_nxv2i64_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vlxe.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -1828,7 +1828,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vlxe.mask.nxv4i64.nxv4i32(
 define <vscale x 4 x i64> @intrinsic_vlxe_mask_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i64_nxv4i64_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vlxe.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -1868,7 +1868,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vlxe.mask.nxv8i64.nxv8i32(
 define <vscale x 8 x i64> @intrinsic_vlxe_mask_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i64_nxv8i64_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vlxe.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -1908,7 +1908,7 @@ declare <vscale x 1 x half> @llvm.riscv.vlxe.mask.nxv1f16.nxv1i32(
 define <vscale x 1 x half> @intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vlxe.mask.nxv1f16.nxv1i32(
     <vscale x 1 x half> %0,
@@ -1948,7 +1948,7 @@ declare <vscale x 2 x half> @llvm.riscv.vlxe.mask.nxv2f16.nxv2i32(
 define <vscale x 2 x half> @intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vlxe.mask.nxv2f16.nxv2i32(
     <vscale x 2 x half> %0,
@@ -1988,7 +1988,7 @@ declare <vscale x 4 x half> @llvm.riscv.vlxe.mask.nxv4f16.nxv4i32(
 define <vscale x 4 x half> @intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vlxe.mask.nxv4f16.nxv4i32(
     <vscale x 4 x half> %0,
@@ -2028,7 +2028,7 @@ declare <vscale x 8 x half> @llvm.riscv.vlxe.mask.nxv8f16.nxv8i32(
 define <vscale x 8 x half> @intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vlxe.mask.nxv8f16.nxv8i32(
     <vscale x 8 x half> %0,
@@ -2068,7 +2068,7 @@ declare <vscale x 16 x half> @llvm.riscv.vlxe.mask.nxv16f16.nxv16i32(
 define <vscale x 16 x half> @intrinsic_vlxe_mask_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16f16_nxv16f16_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vlxe.mask.nxv16f16.nxv16i32(
     <vscale x 16 x half> %0,
@@ -2108,7 +2108,7 @@ declare <vscale x 1 x float> @llvm.riscv.vlxe.mask.nxv1f32.nxv1i32(
 define <vscale x 1 x float> @intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vlxe.mask.nxv1f32.nxv1i32(
     <vscale x 1 x float> %0,
@@ -2148,7 +2148,7 @@ declare <vscale x 2 x float> @llvm.riscv.vlxe.mask.nxv2f32.nxv2i32(
 define <vscale x 2 x float> @intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vlxe.mask.nxv2f32.nxv2i32(
     <vscale x 2 x float> %0,
@@ -2188,7 +2188,7 @@ declare <vscale x 4 x float> @llvm.riscv.vlxe.mask.nxv4f32.nxv4i32(
 define <vscale x 4 x float> @intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vlxe.mask.nxv4f32.nxv4i32(
     <vscale x 4 x float> %0,
@@ -2228,7 +2228,7 @@ declare <vscale x 8 x float> @llvm.riscv.vlxe.mask.nxv8f32.nxv8i32(
 define <vscale x 8 x float> @intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vlxe.mask.nxv8f32.nxv8i32(
     <vscale x 8 x float> %0,
@@ -2268,7 +2268,7 @@ declare <vscale x 16 x float> @llvm.riscv.vlxe.mask.nxv16f32.nxv16i32(
 define <vscale x 16 x float> @intrinsic_vlxe_mask_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16f32_nxv16f32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vlxe.mask.nxv16f32.nxv16i32(
     <vscale x 16 x float> %0,
@@ -2308,7 +2308,7 @@ declare <vscale x 1 x double> @llvm.riscv.vlxe.mask.nxv1f64.nxv1i32(
 define <vscale x 1 x double> @intrinsic_vlxe_mask_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f64_nxv1f64_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x double> @llvm.riscv.vlxe.mask.nxv1f64.nxv1i32(
     <vscale x 1 x double> %0,
@@ -2348,7 +2348,7 @@ declare <vscale x 2 x double> @llvm.riscv.vlxe.mask.nxv2f64.nxv2i32(
 define <vscale x 2 x double> @intrinsic_vlxe_mask_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f64_nxv2f64_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x double> @llvm.riscv.vlxe.mask.nxv2f64.nxv2i32(
     <vscale x 2 x double> %0,
@@ -2388,7 +2388,7 @@ declare <vscale x 4 x double> @llvm.riscv.vlxe.mask.nxv4f64.nxv4i32(
 define <vscale x 4 x double> @intrinsic_vlxe_mask_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f64_nxv4f64_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x double> @llvm.riscv.vlxe.mask.nxv4f64.nxv4i32(
     <vscale x 4 x double> %0,
@@ -2428,7 +2428,7 @@ declare <vscale x 8 x double> @llvm.riscv.vlxe.mask.nxv8f64.nxv8i32(
 define <vscale x 8 x double> @intrinsic_vlxe_mask_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f64_nxv8f64_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x double> @llvm.riscv.vlxe.mask.nxv8f64.nxv8i32(
     <vscale x 8 x double> %0,
@@ -2468,7 +2468,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vlxe.mask.nxv1i8.nxv1i16(
 define <vscale x 1 x i8> @intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vlxe.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
@@ -2508,7 +2508,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vlxe.mask.nxv2i8.nxv2i16(
 define <vscale x 2 x i8> @intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vlxe.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
@@ -2548,7 +2548,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vlxe.mask.nxv4i8.nxv4i16(
 define <vscale x 4 x i8> @intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vlxe.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
@@ -2588,7 +2588,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vlxe.mask.nxv8i8.nxv8i16(
 define <vscale x 8 x i8> @intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vlxe.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
@@ -2628,7 +2628,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vlxe.mask.nxv16i8.nxv16i16(
 define <vscale x 16 x i8> @intrinsic_vlxe_mask_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i8_nxv16i8_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vlxe.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
@@ -2668,7 +2668,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vlxe.mask.nxv32i8.nxv32i16(
 define <vscale x 32 x i8> @intrinsic_vlxe_mask_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv32i8_nxv32i8_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vlxe.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
@@ -2708,7 +2708,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vlxe.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vlxe.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -2748,7 +2748,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vlxe.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vlxe.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -2788,7 +2788,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vlxe.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vlxe.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -2828,7 +2828,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vlxe.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vlxe.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -2868,7 +2868,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vlxe.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vlxe_mask_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vlxe.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -2908,7 +2908,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vlxe.mask.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vlxe_mask_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv32i16_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vlxe.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -2948,7 +2948,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vlxe.mask.nxv1i32.nxv1i16(
 define <vscale x 1 x i32> @intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vlxe.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -2988,7 +2988,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vlxe.mask.nxv2i32.nxv2i16(
 define <vscale x 2 x i32> @intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vlxe.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -3028,7 +3028,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vlxe.mask.nxv4i32.nxv4i16(
 define <vscale x 4 x i32> @intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vlxe.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -3068,7 +3068,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vlxe.mask.nxv8i32.nxv8i16(
 define <vscale x 8 x i32> @intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vlxe.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -3108,7 +3108,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vlxe.mask.nxv16i32.nxv16i16(
 define <vscale x 16 x i32> @intrinsic_vlxe_mask_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i32_nxv16i32_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vlxe.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -3148,7 +3148,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vlxe.mask.nxv1i64.nxv1i16(
 define <vscale x 1 x i64> @intrinsic_vlxe_mask_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i64_nxv1i64_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vlxe.mask.nxv1i64.nxv1i16(
     <vscale x 1 x i64> %0,
@@ -3188,7 +3188,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vlxe.mask.nxv2i64.nxv2i16(
 define <vscale x 2 x i64> @intrinsic_vlxe_mask_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i64_nxv2i64_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vlxe.mask.nxv2i64.nxv2i16(
     <vscale x 2 x i64> %0,
@@ -3228,7 +3228,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vlxe.mask.nxv4i64.nxv4i16(
 define <vscale x 4 x i64> @intrinsic_vlxe_mask_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i64_nxv4i64_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vlxe.mask.nxv4i64.nxv4i16(
     <vscale x 4 x i64> %0,
@@ -3268,7 +3268,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vlxe.mask.nxv8i64.nxv8i16(
 define <vscale x 8 x i64> @intrinsic_vlxe_mask_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i64_nxv8i64_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vlxe.mask.nxv8i64.nxv8i16(
     <vscale x 8 x i64> %0,
@@ -3308,7 +3308,7 @@ declare <vscale x 1 x half> @llvm.riscv.vlxe.mask.nxv1f16.nxv1i16(
 define <vscale x 1 x half> @intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vlxe.mask.nxv1f16.nxv1i16(
     <vscale x 1 x half> %0,
@@ -3348,7 +3348,7 @@ declare <vscale x 2 x half> @llvm.riscv.vlxe.mask.nxv2f16.nxv2i16(
 define <vscale x 2 x half> @intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vlxe.mask.nxv2f16.nxv2i16(
     <vscale x 2 x half> %0,
@@ -3388,7 +3388,7 @@ declare <vscale x 4 x half> @llvm.riscv.vlxe.mask.nxv4f16.nxv4i16(
 define <vscale x 4 x half> @intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vlxe.mask.nxv4f16.nxv4i16(
     <vscale x 4 x half> %0,
@@ -3428,7 +3428,7 @@ declare <vscale x 8 x half> @llvm.riscv.vlxe.mask.nxv8f16.nxv8i16(
 define <vscale x 8 x half> @intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vlxe.mask.nxv8f16.nxv8i16(
     <vscale x 8 x half> %0,
@@ -3468,7 +3468,7 @@ declare <vscale x 16 x half> @llvm.riscv.vlxe.mask.nxv16f16.nxv16i16(
 define <vscale x 16 x half> @intrinsic_vlxe_mask_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16f16_nxv16f16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vlxe.mask.nxv16f16.nxv16i16(
     <vscale x 16 x half> %0,
@@ -3508,7 +3508,7 @@ declare <vscale x 32 x half> @llvm.riscv.vlxe.mask.nxv32f16.nxv32i16(
 define <vscale x 32 x half> @intrinsic_vlxe_mask_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv32f16_nxv32f16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x half> @llvm.riscv.vlxe.mask.nxv32f16.nxv32i16(
     <vscale x 32 x half> %0,
@@ -3548,7 +3548,7 @@ declare <vscale x 1 x float> @llvm.riscv.vlxe.mask.nxv1f32.nxv1i16(
 define <vscale x 1 x float> @intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vlxe.mask.nxv1f32.nxv1i16(
     <vscale x 1 x float> %0,
@@ -3588,7 +3588,7 @@ declare <vscale x 2 x float> @llvm.riscv.vlxe.mask.nxv2f32.nxv2i16(
 define <vscale x 2 x float> @intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vlxe.mask.nxv2f32.nxv2i16(
     <vscale x 2 x float> %0,
@@ -3628,7 +3628,7 @@ declare <vscale x 4 x float> @llvm.riscv.vlxe.mask.nxv4f32.nxv4i16(
 define <vscale x 4 x float> @intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vlxe.mask.nxv4f32.nxv4i16(
     <vscale x 4 x float> %0,
@@ -3668,7 +3668,7 @@ declare <vscale x 8 x float> @llvm.riscv.vlxe.mask.nxv8f32.nxv8i16(
 define <vscale x 8 x float> @intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vlxe.mask.nxv8f32.nxv8i16(
     <vscale x 8 x float> %0,
@@ -3708,7 +3708,7 @@ declare <vscale x 16 x float> @llvm.riscv.vlxe.mask.nxv16f32.nxv16i16(
 define <vscale x 16 x float> @intrinsic_vlxe_mask_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16f32_nxv16f32_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vlxe.mask.nxv16f32.nxv16i16(
     <vscale x 16 x float> %0,
@@ -3748,7 +3748,7 @@ declare <vscale x 1 x double> @llvm.riscv.vlxe.mask.nxv1f64.nxv1i16(
 define <vscale x 1 x double> @intrinsic_vlxe_mask_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f64_nxv1f64_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x double> @llvm.riscv.vlxe.mask.nxv1f64.nxv1i16(
     <vscale x 1 x double> %0,
@@ -3788,7 +3788,7 @@ declare <vscale x 2 x double> @llvm.riscv.vlxe.mask.nxv2f64.nxv2i16(
 define <vscale x 2 x double> @intrinsic_vlxe_mask_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f64_nxv2f64_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x double> @llvm.riscv.vlxe.mask.nxv2f64.nxv2i16(
     <vscale x 2 x double> %0,
@@ -3828,7 +3828,7 @@ declare <vscale x 4 x double> @llvm.riscv.vlxe.mask.nxv4f64.nxv4i16(
 define <vscale x 4 x double> @intrinsic_vlxe_mask_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f64_nxv4f64_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x double> @llvm.riscv.vlxe.mask.nxv4f64.nxv4i16(
     <vscale x 4 x double> %0,
@@ -3868,7 +3868,7 @@ declare <vscale x 8 x double> @llvm.riscv.vlxe.mask.nxv8f64.nxv8i16(
 define <vscale x 8 x double> @intrinsic_vlxe_mask_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f64_nxv8f64_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x double> @llvm.riscv.vlxe.mask.nxv8f64.nxv8i16(
     <vscale x 8 x double> %0,
@@ -3908,7 +3908,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vlxe.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vlxe.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -3948,7 +3948,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vlxe.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vlxe.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -3988,7 +3988,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vlxe.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vlxe.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -4028,7 +4028,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vlxe.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vlxe.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -4068,7 +4068,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vlxe.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vlxe_mask_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vlxe.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -4108,7 +4108,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vlxe.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vlxe_mask_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vlxe.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -4148,7 +4148,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vlxe.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vlxe_mask_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv64i8_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vlxe.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -4188,7 +4188,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vlxe.mask.nxv1i16.nxv1i8(
 define <vscale x 1 x i16> @intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vlxe.mask.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -4228,7 +4228,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vlxe.mask.nxv2i16.nxv2i8(
 define <vscale x 2 x i16> @intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vlxe.mask.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -4268,7 +4268,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vlxe.mask.nxv4i16.nxv4i8(
 define <vscale x 4 x i16> @intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vlxe.mask.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -4308,7 +4308,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vlxe.mask.nxv8i16.nxv8i8(
 define <vscale x 8 x i16> @intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vlxe.mask.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -4348,7 +4348,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vlxe.mask.nxv16i16.nxv16i8(
 define <vscale x 16 x i16> @intrinsic_vlxe_mask_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i16_nxv16i16_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vlxe.mask.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -4388,7 +4388,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vlxe.mask.nxv32i16.nxv32i8(
 define <vscale x 32 x i16> @intrinsic_vlxe_mask_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv32i16_nxv32i16_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vlxe.mask.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -4428,7 +4428,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vlxe.mask.nxv1i32.nxv1i8(
 define <vscale x 1 x i32> @intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vlxe.mask.nxv1i32.nxv1i8(
     <vscale x 1 x i32> %0,
@@ -4468,7 +4468,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vlxe.mask.nxv2i32.nxv2i8(
 define <vscale x 2 x i32> @intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vlxe.mask.nxv2i32.nxv2i8(
     <vscale x 2 x i32> %0,
@@ -4508,7 +4508,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vlxe.mask.nxv4i32.nxv4i8(
 define <vscale x 4 x i32> @intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vlxe.mask.nxv4i32.nxv4i8(
     <vscale x 4 x i32> %0,
@@ -4548,7 +4548,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vlxe.mask.nxv8i32.nxv8i8(
 define <vscale x 8 x i32> @intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vlxe.mask.nxv8i32.nxv8i8(
     <vscale x 8 x i32> %0,
@@ -4588,7 +4588,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vlxe.mask.nxv16i32.nxv16i8(
 define <vscale x 16 x i32> @intrinsic_vlxe_mask_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i32_nxv16i32_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vlxe.mask.nxv16i32.nxv16i8(
     <vscale x 16 x i32> %0,
@@ -4628,7 +4628,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vlxe.mask.nxv1i64.nxv1i8(
 define <vscale x 1 x i64> @intrinsic_vlxe_mask_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i64_nxv1i64_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vlxe.mask.nxv1i64.nxv1i8(
     <vscale x 1 x i64> %0,
@@ -4668,7 +4668,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vlxe.mask.nxv2i64.nxv2i8(
 define <vscale x 2 x i64> @intrinsic_vlxe_mask_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i64_nxv2i64_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vlxe.mask.nxv2i64.nxv2i8(
     <vscale x 2 x i64> %0,
@@ -4708,7 +4708,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vlxe.mask.nxv4i64.nxv4i8(
 define <vscale x 4 x i64> @intrinsic_vlxe_mask_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i64_nxv4i64_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vlxe.mask.nxv4i64.nxv4i8(
     <vscale x 4 x i64> %0,
@@ -4748,7 +4748,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vlxe.mask.nxv8i64.nxv8i8(
 define <vscale x 8 x i64> @intrinsic_vlxe_mask_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i64_nxv8i64_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vlxe.mask.nxv8i64.nxv8i8(
     <vscale x 8 x i64> %0,
@@ -4788,7 +4788,7 @@ declare <vscale x 1 x half> @llvm.riscv.vlxe.mask.nxv1f16.nxv1i8(
 define <vscale x 1 x half> @intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vlxe.mask.nxv1f16.nxv1i8(
     <vscale x 1 x half> %0,
@@ -4828,7 +4828,7 @@ declare <vscale x 2 x half> @llvm.riscv.vlxe.mask.nxv2f16.nxv2i8(
 define <vscale x 2 x half> @intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vlxe.mask.nxv2f16.nxv2i8(
     <vscale x 2 x half> %0,
@@ -4868,7 +4868,7 @@ declare <vscale x 4 x half> @llvm.riscv.vlxe.mask.nxv4f16.nxv4i8(
 define <vscale x 4 x half> @intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vlxe.mask.nxv4f16.nxv4i8(
     <vscale x 4 x half> %0,
@@ -4908,7 +4908,7 @@ declare <vscale x 8 x half> @llvm.riscv.vlxe.mask.nxv8f16.nxv8i8(
 define <vscale x 8 x half> @intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vlxe.mask.nxv8f16.nxv8i8(
     <vscale x 8 x half> %0,
@@ -4948,7 +4948,7 @@ declare <vscale x 16 x half> @llvm.riscv.vlxe.mask.nxv16f16.nxv16i8(
 define <vscale x 16 x half> @intrinsic_vlxe_mask_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16f16_nxv16f16_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vlxe.mask.nxv16f16.nxv16i8(
     <vscale x 16 x half> %0,
@@ -4988,7 +4988,7 @@ declare <vscale x 32 x half> @llvm.riscv.vlxe.mask.nxv32f16.nxv32i8(
 define <vscale x 32 x half> @intrinsic_vlxe_mask_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv32f16_nxv32f16_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x half> @llvm.riscv.vlxe.mask.nxv32f16.nxv32i8(
     <vscale x 32 x half> %0,
@@ -5028,7 +5028,7 @@ declare <vscale x 1 x float> @llvm.riscv.vlxe.mask.nxv1f32.nxv1i8(
 define <vscale x 1 x float> @intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vlxe.mask.nxv1f32.nxv1i8(
     <vscale x 1 x float> %0,
@@ -5068,7 +5068,7 @@ declare <vscale x 2 x float> @llvm.riscv.vlxe.mask.nxv2f32.nxv2i8(
 define <vscale x 2 x float> @intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vlxe.mask.nxv2f32.nxv2i8(
     <vscale x 2 x float> %0,
@@ -5108,7 +5108,7 @@ declare <vscale x 4 x float> @llvm.riscv.vlxe.mask.nxv4f32.nxv4i8(
 define <vscale x 4 x float> @intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vlxe.mask.nxv4f32.nxv4i8(
     <vscale x 4 x float> %0,
@@ -5148,7 +5148,7 @@ declare <vscale x 8 x float> @llvm.riscv.vlxe.mask.nxv8f32.nxv8i8(
 define <vscale x 8 x float> @intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vlxe.mask.nxv8f32.nxv8i8(
     <vscale x 8 x float> %0,
@@ -5188,7 +5188,7 @@ declare <vscale x 16 x float> @llvm.riscv.vlxe.mask.nxv16f32.nxv16i8(
 define <vscale x 16 x float> @intrinsic_vlxe_mask_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16f32_nxv16f32_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vlxe.mask.nxv16f32.nxv16i8(
     <vscale x 16 x float> %0,
@@ -5228,7 +5228,7 @@ declare <vscale x 1 x double> @llvm.riscv.vlxe.mask.nxv1f64.nxv1i8(
 define <vscale x 1 x double> @intrinsic_vlxe_mask_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f64_nxv1f64_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x double> @llvm.riscv.vlxe.mask.nxv1f64.nxv1i8(
     <vscale x 1 x double> %0,
@@ -5268,7 +5268,7 @@ declare <vscale x 2 x double> @llvm.riscv.vlxe.mask.nxv2f64.nxv2i8(
 define <vscale x 2 x double> @intrinsic_vlxe_mask_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f64_nxv2f64_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x double> @llvm.riscv.vlxe.mask.nxv2f64.nxv2i8(
     <vscale x 2 x double> %0,
@@ -5308,7 +5308,7 @@ declare <vscale x 4 x double> @llvm.riscv.vlxe.mask.nxv4f64.nxv4i8(
 define <vscale x 4 x double> @intrinsic_vlxe_mask_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f64_nxv4f64_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x double> @llvm.riscv.vlxe.mask.nxv4f64.nxv4i8(
     <vscale x 4 x double> %0,
@@ -5348,7 +5348,7 @@ declare <vscale x 8 x double> @llvm.riscv.vlxe.mask.nxv8f64.nxv8i8(
 define <vscale x 8 x double> @intrinsic_vlxe_mask_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f64_nxv8f64_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x double> @llvm.riscv.vlxe.mask.nxv8f64.nxv8i8(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmacc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmacc-rv32.ll
index 24a2a82f6ff0..889da77c5b4f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmacc-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmacc-rv32.ll
@@ -9,7 +9,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.nxv1i8(
 define <vscale x 1 x i8>  @intrinsic_vmacc_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -30,7 +30,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8>  @intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -51,7 +51,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.nxv2i8(
 define <vscale x 2 x i8>  @intrinsic_vmacc_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -72,7 +72,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8>  @intrinsic_vmacc_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -93,7 +93,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.nxv4i8(
 define <vscale x 4 x i8>  @intrinsic_vmacc_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -114,7 +114,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8>  @intrinsic_vmacc_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -135,7 +135,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.nxv8i8(
 define <vscale x 8 x i8>  @intrinsic_vmacc_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -156,7 +156,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8>  @intrinsic_vmacc_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -177,7 +177,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.nxv16i8(
 define <vscale x 16 x i8>  @intrinsic_vmacc_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -198,7 +198,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8>  @intrinsic_vmacc_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -219,7 +219,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.nxv32i8(
 define <vscale x 32 x i8>  @intrinsic_vmacc_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -240,7 +240,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8>  @intrinsic_vmacc_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -261,7 +261,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.nxv1i16(
 define <vscale x 1 x i16>  @intrinsic_vmacc_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -282,7 +282,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16>  @intrinsic_vmacc_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -303,7 +303,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.nxv2i16(
 define <vscale x 2 x i16>  @intrinsic_vmacc_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -324,7 +324,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16>  @intrinsic_vmacc_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -345,7 +345,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.nxv4i16(
 define <vscale x 4 x i16>  @intrinsic_vmacc_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -366,7 +366,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16>  @intrinsic_vmacc_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -387,7 +387,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.nxv8i16(
 define <vscale x 8 x i16>  @intrinsic_vmacc_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -408,7 +408,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16>  @intrinsic_vmacc_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -429,7 +429,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.nxv16i16(
 define <vscale x 16 x i16>  @intrinsic_vmacc_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -450,7 +450,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16>  @intrinsic_vmacc_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -471,7 +471,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.nxv1i32(
 define <vscale x 1 x i32>  @intrinsic_vmacc_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -492,7 +492,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32>  @intrinsic_vmacc_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -513,7 +513,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.nxv2i32(
 define <vscale x 2 x i32>  @intrinsic_vmacc_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -534,7 +534,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32>  @intrinsic_vmacc_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -555,7 +555,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.nxv4i32(
 define <vscale x 4 x i32>  @intrinsic_vmacc_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -576,7 +576,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32>  @intrinsic_vmacc_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -597,7 +597,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.nxv8i32(
 define <vscale x 8 x i32>  @intrinsic_vmacc_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -618,7 +618,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32>  @intrinsic_vmacc_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -639,7 +639,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.i8(
 define <vscale x 1 x i8>  @intrinsic_vmacc_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_vx_nxv1i8_i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -660,7 +660,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vmacc_mask_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv1i8_i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -681,7 +681,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.i8(
 define <vscale x 2 x i8>  @intrinsic_vmacc_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_vx_nxv2i8_i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -702,7 +702,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vmacc_mask_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv2i8_i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -723,7 +723,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.i8(
 define <vscale x 4 x i8>  @intrinsic_vmacc_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_vx_nxv4i8_i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -744,7 +744,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vmacc_mask_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv4i8_i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -765,7 +765,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.i8(
 define <vscale x 8 x i8>  @intrinsic_vmacc_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_vx_nxv8i8_i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -786,7 +786,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vmacc_mask_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv8i8_i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -807,7 +807,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.i8(
 define <vscale x 16 x i8>  @intrinsic_vmacc_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_vx_nxv16i8_i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -828,7 +828,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vmacc_mask_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv16i8_i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -849,7 +849,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.i8(
 define <vscale x 32 x i8>  @intrinsic_vmacc_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_vx_nxv32i8_i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -870,7 +870,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vmacc_mask_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv32i8_i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -891,7 +891,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.i16(
 define <vscale x 1 x i16>  @intrinsic_vmacc_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_vx_nxv1i16_i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -912,7 +912,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vmacc_mask_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv1i16_i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -933,7 +933,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.i16(
 define <vscale x 2 x i16>  @intrinsic_vmacc_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_vx_nxv2i16_i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -954,7 +954,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vmacc_mask_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv2i16_i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -975,7 +975,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.i16(
 define <vscale x 4 x i16>  @intrinsic_vmacc_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_vx_nxv4i16_i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -996,7 +996,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vmacc_mask_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv4i16_i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1017,7 +1017,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.i16(
 define <vscale x 8 x i16>  @intrinsic_vmacc_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_vx_nxv8i16_i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1038,7 +1038,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vmacc_mask_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv8i16_i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1059,7 +1059,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.i16(
 define <vscale x 16 x i16>  @intrinsic_vmacc_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_vx_nxv16i16_i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1080,7 +1080,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vmacc_mask_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv16i16_i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1101,7 +1101,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.i32(
 define <vscale x 1 x i32>  @intrinsic_vmacc_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_vx_nxv1i32_i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1122,7 +1122,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vmacc_mask_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv1i32_i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1143,7 +1143,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.i32(
 define <vscale x 2 x i32>  @intrinsic_vmacc_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_vx_nxv2i32_i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1164,7 +1164,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vmacc_mask_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv2i32_i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1185,7 +1185,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.i32(
 define <vscale x 4 x i32>  @intrinsic_vmacc_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_vx_nxv4i32_i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1206,7 +1206,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vmacc_mask_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv4i32_i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1227,7 +1227,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.i32(
 define <vscale x 8 x i32>  @intrinsic_vmacc_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_vx_nxv8i32_i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1248,7 +1248,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vmacc_mask_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv8i32_i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmacc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmacc-rv64.ll
index 9e7d36368d34..db3104f9bfcd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmacc-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmacc-rv64.ll
@@ -9,7 +9,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.nxv1i8(
 define <vscale x 1 x i8>  @intrinsic_vmacc_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -30,7 +30,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8>  @intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -51,7 +51,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.nxv2i8(
 define <vscale x 2 x i8>  @intrinsic_vmacc_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -72,7 +72,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8>  @intrinsic_vmacc_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -93,7 +93,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.nxv4i8(
 define <vscale x 4 x i8>  @intrinsic_vmacc_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -114,7 +114,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8>  @intrinsic_vmacc_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -135,7 +135,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.nxv8i8(
 define <vscale x 8 x i8>  @intrinsic_vmacc_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -156,7 +156,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8>  @intrinsic_vmacc_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -177,7 +177,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.nxv16i8(
 define <vscale x 16 x i8>  @intrinsic_vmacc_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -198,7 +198,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8>  @intrinsic_vmacc_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -219,7 +219,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.nxv32i8(
 define <vscale x 32 x i8>  @intrinsic_vmacc_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -240,7 +240,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8>  @intrinsic_vmacc_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -261,7 +261,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.nxv1i16(
 define <vscale x 1 x i16>  @intrinsic_vmacc_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -282,7 +282,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16>  @intrinsic_vmacc_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -303,7 +303,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.nxv2i16(
 define <vscale x 2 x i16>  @intrinsic_vmacc_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -324,7 +324,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16>  @intrinsic_vmacc_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -345,7 +345,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.nxv4i16(
 define <vscale x 4 x i16>  @intrinsic_vmacc_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -366,7 +366,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16>  @intrinsic_vmacc_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -387,7 +387,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.nxv8i16(
 define <vscale x 8 x i16>  @intrinsic_vmacc_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -408,7 +408,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16>  @intrinsic_vmacc_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -429,7 +429,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.nxv16i16(
 define <vscale x 16 x i16>  @intrinsic_vmacc_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -450,7 +450,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16>  @intrinsic_vmacc_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -471,7 +471,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.nxv1i32(
 define <vscale x 1 x i32>  @intrinsic_vmacc_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -492,7 +492,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32>  @intrinsic_vmacc_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -513,7 +513,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.nxv2i32(
 define <vscale x 2 x i32>  @intrinsic_vmacc_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -534,7 +534,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32>  @intrinsic_vmacc_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -555,7 +555,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.nxv4i32(
 define <vscale x 4 x i32>  @intrinsic_vmacc_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -576,7 +576,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32>  @intrinsic_vmacc_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -597,7 +597,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.nxv8i32(
 define <vscale x 8 x i32>  @intrinsic_vmacc_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -618,7 +618,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32>  @intrinsic_vmacc_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -639,7 +639,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.nxv1i64(
 define <vscale x 1 x i64>  @intrinsic_vmacc_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_vv_nxv1i64_nxv1i64_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -660,7 +660,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64>  @intrinsic_vmacc_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i64_nxv1i64_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -681,7 +681,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.nxv2i64(
 define <vscale x 2 x i64>  @intrinsic_vmacc_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_vv_nxv2i64_nxv2i64_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -702,7 +702,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64(
 define <vscale x 2 x i64>  @intrinsic_vmacc_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i64_nxv2i64_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -723,7 +723,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.nxv4i64(
 define <vscale x 4 x i64>  @intrinsic_vmacc_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_vv_nxv4i64_nxv4i64_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -744,7 +744,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64(
 define <vscale x 4 x i64>  @intrinsic_vmacc_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i64_nxv4i64_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -765,7 +765,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.i8(
 define <vscale x 1 x i8>  @intrinsic_vmacc_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_vx_nxv1i8_i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -786,7 +786,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vmacc_mask_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv1i8_i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -807,7 +807,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.i8(
 define <vscale x 2 x i8>  @intrinsic_vmacc_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_vx_nxv2i8_i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -828,7 +828,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vmacc_mask_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv2i8_i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -849,7 +849,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.i8(
 define <vscale x 4 x i8>  @intrinsic_vmacc_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_vx_nxv4i8_i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -870,7 +870,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vmacc_mask_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv4i8_i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -891,7 +891,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.i8(
 define <vscale x 8 x i8>  @intrinsic_vmacc_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_vx_nxv8i8_i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -912,7 +912,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vmacc_mask_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv8i8_i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -933,7 +933,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.i8(
 define <vscale x 16 x i8>  @intrinsic_vmacc_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_vx_nxv16i8_i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -954,7 +954,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vmacc_mask_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv16i8_i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -975,7 +975,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.i8(
 define <vscale x 32 x i8>  @intrinsic_vmacc_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_vx_nxv32i8_i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -996,7 +996,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vmacc_mask_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv32i8_i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1017,7 +1017,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.i16(
 define <vscale x 1 x i16>  @intrinsic_vmacc_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_vx_nxv1i16_i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1038,7 +1038,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vmacc_mask_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv1i16_i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1059,7 +1059,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.i16(
 define <vscale x 2 x i16>  @intrinsic_vmacc_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_vx_nxv2i16_i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1080,7 +1080,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vmacc_mask_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv2i16_i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1101,7 +1101,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.i16(
 define <vscale x 4 x i16>  @intrinsic_vmacc_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_vx_nxv4i16_i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1122,7 +1122,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vmacc_mask_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv4i16_i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1143,7 +1143,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.i16(
 define <vscale x 8 x i16>  @intrinsic_vmacc_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_vx_nxv8i16_i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1164,7 +1164,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vmacc_mask_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv8i16_i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1185,7 +1185,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.i16(
 define <vscale x 16 x i16>  @intrinsic_vmacc_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_vx_nxv16i16_i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1206,7 +1206,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vmacc_mask_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv16i16_i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1227,7 +1227,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.i32(
 define <vscale x 1 x i32>  @intrinsic_vmacc_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_vx_nxv1i32_i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1248,7 +1248,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vmacc_mask_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv1i32_i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1269,7 +1269,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.i32(
 define <vscale x 2 x i32>  @intrinsic_vmacc_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_vx_nxv2i32_i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1290,7 +1290,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vmacc_mask_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv2i32_i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1311,7 +1311,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.i32(
 define <vscale x 4 x i32>  @intrinsic_vmacc_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_vx_nxv4i32_i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1332,7 +1332,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vmacc_mask_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv4i32_i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1353,7 +1353,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.i32(
 define <vscale x 8 x i32>  @intrinsic_vmacc_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_vx_nxv8i32_i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1374,7 +1374,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vmacc_mask_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv8i32_i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1395,7 +1395,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.i64(
 define <vscale x 1 x i64>  @intrinsic_vmacc_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_vx_nxv1i64_i64_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1416,7 +1416,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.i64(
 define <vscale x 1 x i64> @intrinsic_vmacc_mask_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv1i64_i64_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1437,7 +1437,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.i64(
 define <vscale x 2 x i64>  @intrinsic_vmacc_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_vx_nxv2i64_i64_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1458,7 +1458,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.i64(
 define <vscale x 2 x i64> @intrinsic_vmacc_mask_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv2i64_i64_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1479,7 +1479,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.i64(
 define <vscale x 4 x i64>  @intrinsic_vmacc_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_vx_nxv4i64_i64_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1500,7 +1500,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.i64(
 define <vscale x 4 x i64> @intrinsic_vmacc_mask_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv4i64_i64_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vmacc.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmadd-rv32.ll
index 92744c6e7df4..8307cd512498 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmadd-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmadd-rv32.ll
@@ -9,7 +9,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.nxv1i8(
 define <vscale x 1 x i8>  @intrinsic_vmadd_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -30,7 +30,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8>  @intrinsic_vmadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -51,7 +51,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.nxv2i8(
 define <vscale x 2 x i8>  @intrinsic_vmadd_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -72,7 +72,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8>  @intrinsic_vmadd_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -93,7 +93,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.nxv4i8(
 define <vscale x 4 x i8>  @intrinsic_vmadd_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -114,7 +114,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8>  @intrinsic_vmadd_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -135,7 +135,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.nxv8i8(
 define <vscale x 8 x i8>  @intrinsic_vmadd_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -156,7 +156,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8>  @intrinsic_vmadd_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -177,7 +177,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.nxv16i8(
 define <vscale x 16 x i8>  @intrinsic_vmadd_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -198,7 +198,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8>  @intrinsic_vmadd_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -219,7 +219,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.nxv32i8(
 define <vscale x 32 x i8>  @intrinsic_vmadd_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -240,7 +240,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8>  @intrinsic_vmadd_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -261,7 +261,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.nxv1i16(
 define <vscale x 1 x i16>  @intrinsic_vmadd_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -282,7 +282,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16>  @intrinsic_vmadd_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -303,7 +303,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.nxv2i16(
 define <vscale x 2 x i16>  @intrinsic_vmadd_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -324,7 +324,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16>  @intrinsic_vmadd_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -345,7 +345,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.nxv4i16(
 define <vscale x 4 x i16>  @intrinsic_vmadd_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -366,7 +366,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16>  @intrinsic_vmadd_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -387,7 +387,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.nxv8i16(
 define <vscale x 8 x i16>  @intrinsic_vmadd_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -408,7 +408,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16>  @intrinsic_vmadd_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -429,7 +429,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.nxv16i16(
 define <vscale x 16 x i16>  @intrinsic_vmadd_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -450,7 +450,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16>  @intrinsic_vmadd_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -471,7 +471,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.nxv1i32(
 define <vscale x 1 x i32>  @intrinsic_vmadd_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -492,7 +492,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32>  @intrinsic_vmadd_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -513,7 +513,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.nxv2i32(
 define <vscale x 2 x i32>  @intrinsic_vmadd_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -534,7 +534,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32>  @intrinsic_vmadd_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -555,7 +555,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.nxv4i32(
 define <vscale x 4 x i32>  @intrinsic_vmadd_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -576,7 +576,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32>  @intrinsic_vmadd_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -597,7 +597,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.nxv8i32(
 define <vscale x 8 x i32>  @intrinsic_vmadd_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -618,7 +618,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32>  @intrinsic_vmadd_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -639,7 +639,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.i8(
 define <vscale x 1 x i8>  @intrinsic_vmadd_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_vx_nxv1i8_i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -660,7 +660,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vmadd_mask_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i8_i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -681,7 +681,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.i8(
 define <vscale x 2 x i8>  @intrinsic_vmadd_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_vx_nxv2i8_i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -702,7 +702,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vmadd_mask_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i8_i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -723,7 +723,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.i8(
 define <vscale x 4 x i8>  @intrinsic_vmadd_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_vx_nxv4i8_i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -744,7 +744,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vmadd_mask_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i8_i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -765,7 +765,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.i8(
 define <vscale x 8 x i8>  @intrinsic_vmadd_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_vx_nxv8i8_i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -786,7 +786,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vmadd_mask_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv8i8_i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -807,7 +807,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.i8(
 define <vscale x 16 x i8>  @intrinsic_vmadd_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_vx_nxv16i8_i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -828,7 +828,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vmadd_mask_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv16i8_i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -849,7 +849,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.i8(
 define <vscale x 32 x i8>  @intrinsic_vmadd_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_vx_nxv32i8_i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -870,7 +870,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vmadd_mask_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv32i8_i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -891,7 +891,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.i16(
 define <vscale x 1 x i16>  @intrinsic_vmadd_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_vx_nxv1i16_i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -912,7 +912,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vmadd_mask_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i16_i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -933,7 +933,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.i16(
 define <vscale x 2 x i16>  @intrinsic_vmadd_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_vx_nxv2i16_i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -954,7 +954,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vmadd_mask_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i16_i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -975,7 +975,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.i16(
 define <vscale x 4 x i16>  @intrinsic_vmadd_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_vx_nxv4i16_i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -996,7 +996,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vmadd_mask_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i16_i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1017,7 +1017,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.i16(
 define <vscale x 8 x i16>  @intrinsic_vmadd_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_vx_nxv8i16_i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1038,7 +1038,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vmadd_mask_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv8i16_i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1059,7 +1059,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.i16(
 define <vscale x 16 x i16>  @intrinsic_vmadd_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_vx_nxv16i16_i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1080,7 +1080,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vmadd_mask_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv16i16_i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1101,7 +1101,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.i32(
 define <vscale x 1 x i32>  @intrinsic_vmadd_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_vx_nxv1i32_i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1122,7 +1122,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vmadd_mask_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i32_i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1143,7 +1143,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.i32(
 define <vscale x 2 x i32>  @intrinsic_vmadd_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_vx_nxv2i32_i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1164,7 +1164,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vmadd_mask_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i32_i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1185,7 +1185,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.i32(
 define <vscale x 4 x i32>  @intrinsic_vmadd_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_vx_nxv4i32_i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1206,7 +1206,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vmadd_mask_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i32_i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1227,7 +1227,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.i32(
 define <vscale x 8 x i32>  @intrinsic_vmadd_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_vx_nxv8i32_i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1248,7 +1248,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vmadd_mask_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv8i32_i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmadd-rv64.ll
index a6d229dcc706..f4035cd73c6d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmadd-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmadd-rv64.ll
@@ -9,7 +9,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.nxv1i8(
 define <vscale x 1 x i8>  @intrinsic_vmadd_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -30,7 +30,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8>  @intrinsic_vmadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -51,7 +51,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.nxv2i8(
 define <vscale x 2 x i8>  @intrinsic_vmadd_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -72,7 +72,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8>  @intrinsic_vmadd_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -93,7 +93,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.nxv4i8(
 define <vscale x 4 x i8>  @intrinsic_vmadd_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -114,7 +114,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8>  @intrinsic_vmadd_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -135,7 +135,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.nxv8i8(
 define <vscale x 8 x i8>  @intrinsic_vmadd_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -156,7 +156,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8>  @intrinsic_vmadd_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -177,7 +177,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.nxv16i8(
 define <vscale x 16 x i8>  @intrinsic_vmadd_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -198,7 +198,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8>  @intrinsic_vmadd_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -219,7 +219,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.nxv32i8(
 define <vscale x 32 x i8>  @intrinsic_vmadd_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -240,7 +240,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8>  @intrinsic_vmadd_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -261,7 +261,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.nxv1i16(
 define <vscale x 1 x i16>  @intrinsic_vmadd_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -282,7 +282,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16>  @intrinsic_vmadd_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -303,7 +303,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.nxv2i16(
 define <vscale x 2 x i16>  @intrinsic_vmadd_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -324,7 +324,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16>  @intrinsic_vmadd_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -345,7 +345,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.nxv4i16(
 define <vscale x 4 x i16>  @intrinsic_vmadd_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -366,7 +366,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16>  @intrinsic_vmadd_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -387,7 +387,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.nxv8i16(
 define <vscale x 8 x i16>  @intrinsic_vmadd_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -408,7 +408,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16>  @intrinsic_vmadd_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -429,7 +429,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.nxv16i16(
 define <vscale x 16 x i16>  @intrinsic_vmadd_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -450,7 +450,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16>  @intrinsic_vmadd_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -471,7 +471,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.nxv1i32(
 define <vscale x 1 x i32>  @intrinsic_vmadd_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -492,7 +492,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32>  @intrinsic_vmadd_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -513,7 +513,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.nxv2i32(
 define <vscale x 2 x i32>  @intrinsic_vmadd_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -534,7 +534,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32>  @intrinsic_vmadd_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -555,7 +555,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.nxv4i32(
 define <vscale x 4 x i32>  @intrinsic_vmadd_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -576,7 +576,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32>  @intrinsic_vmadd_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -597,7 +597,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.nxv8i32(
 define <vscale x 8 x i32>  @intrinsic_vmadd_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -618,7 +618,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32>  @intrinsic_vmadd_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -639,7 +639,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.nxv1i64(
 define <vscale x 1 x i64>  @intrinsic_vmadd_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i64_nxv1i64_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -660,7 +660,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64>  @intrinsic_vmadd_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i64_nxv1i64_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -681,7 +681,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.nxv2i64(
 define <vscale x 2 x i64>  @intrinsic_vmadd_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i64_nxv2i64_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -702,7 +702,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64(
 define <vscale x 2 x i64>  @intrinsic_vmadd_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i64_nxv2i64_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -723,7 +723,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.nxv4i64(
 define <vscale x 4 x i64>  @intrinsic_vmadd_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i64_nxv4i64_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -744,7 +744,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64(
 define <vscale x 4 x i64>  @intrinsic_vmadd_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i64_nxv4i64_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vmadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -765,7 +765,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.i8(
 define <vscale x 1 x i8>  @intrinsic_vmadd_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_vx_nxv1i8_i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -786,7 +786,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vmadd_mask_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i8_i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -807,7 +807,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.i8(
 define <vscale x 2 x i8>  @intrinsic_vmadd_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_vx_nxv2i8_i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -828,7 +828,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vmadd_mask_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i8_i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -849,7 +849,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.i8(
 define <vscale x 4 x i8>  @intrinsic_vmadd_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_vx_nxv4i8_i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -870,7 +870,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vmadd_mask_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i8_i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -891,7 +891,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.i8(
 define <vscale x 8 x i8>  @intrinsic_vmadd_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_vx_nxv8i8_i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -912,7 +912,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vmadd_mask_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv8i8_i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -933,7 +933,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.i8(
 define <vscale x 16 x i8>  @intrinsic_vmadd_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_vx_nxv16i8_i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -954,7 +954,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vmadd_mask_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv16i8_i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -975,7 +975,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.i8(
 define <vscale x 32 x i8>  @intrinsic_vmadd_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_vx_nxv32i8_i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -996,7 +996,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vmadd_mask_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv32i8_i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1017,7 +1017,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.i16(
 define <vscale x 1 x i16>  @intrinsic_vmadd_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_vx_nxv1i16_i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1038,7 +1038,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vmadd_mask_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i16_i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1059,7 +1059,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.i16(
 define <vscale x 2 x i16>  @intrinsic_vmadd_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_vx_nxv2i16_i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1080,7 +1080,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vmadd_mask_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i16_i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1101,7 +1101,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.i16(
 define <vscale x 4 x i16>  @intrinsic_vmadd_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_vx_nxv4i16_i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1122,7 +1122,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vmadd_mask_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i16_i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1143,7 +1143,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.i16(
 define <vscale x 8 x i16>  @intrinsic_vmadd_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_vx_nxv8i16_i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1164,7 +1164,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vmadd_mask_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv8i16_i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1185,7 +1185,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.i16(
 define <vscale x 16 x i16>  @intrinsic_vmadd_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_vx_nxv16i16_i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1206,7 +1206,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vmadd_mask_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv16i16_i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1227,7 +1227,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.i32(
 define <vscale x 1 x i32>  @intrinsic_vmadd_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_vx_nxv1i32_i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1248,7 +1248,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vmadd_mask_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i32_i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1269,7 +1269,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.i32(
 define <vscale x 2 x i32>  @intrinsic_vmadd_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_vx_nxv2i32_i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1290,7 +1290,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vmadd_mask_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i32_i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1311,7 +1311,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.i32(
 define <vscale x 4 x i32>  @intrinsic_vmadd_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_vx_nxv4i32_i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1332,7 +1332,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vmadd_mask_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i32_i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1353,7 +1353,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.i32(
 define <vscale x 8 x i32>  @intrinsic_vmadd_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_vx_nxv8i32_i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1374,7 +1374,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vmadd_mask_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv8i32_i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1395,7 +1395,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.i64(
 define <vscale x 1 x i64>  @intrinsic_vmadd_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_vx_nxv1i64_i64_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1416,7 +1416,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.i64(
 define <vscale x 1 x i64> @intrinsic_vmadd_mask_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i64_i64_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1437,7 +1437,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.i64(
 define <vscale x 2 x i64>  @intrinsic_vmadd_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_vx_nxv2i64_i64_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1458,7 +1458,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.i64(
 define <vscale x 2 x i64> @intrinsic_vmadd_mask_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i64_i64_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1479,7 +1479,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.i64(
 define <vscale x 4 x i64>  @intrinsic_vmadd_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_vx_nxv4i64_i64_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1500,7 +1500,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.i64(
 define <vscale x 4 x i64> @intrinsic_vmadd_mask_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i64_i64_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vmadd.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmax-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmax-rv32.ll
index a6609b99fa1a..3fdd78afcf84 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmax-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmax-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vmax.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vmax_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vmax.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vmax.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vmax_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vmax.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vmax.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vmax_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vmax.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vmax.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vmax_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vmax.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vmax.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vmax_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vmax.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vmax.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vmax_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vmax.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vmax.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vmax_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv64i8_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vmax.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -308,7 +308,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vmax.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vmax_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vmax.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vmax.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vmax_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vmax.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vmax.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vmax_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vmax.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vmax.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vmax_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vmax.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vmax.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vmax_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vmax.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vmax.mask.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vmax_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv32i16_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vmax.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vmax.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vmax_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vmax.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -588,7 +588,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vmax.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vmax_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vmax.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -628,7 +628,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vmax.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vmax_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vmax.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -668,7 +668,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vmax.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vmax_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vmax.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -708,7 +708,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vmax.mask.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vmax_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv16i32_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vmax.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vmax.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vmax_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vmax.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -788,7 +788,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vmax.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vmax_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vmax.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -828,7 +828,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vmax.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vmax_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vmax.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -868,7 +868,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vmax.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vmax_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vmax.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -908,7 +908,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vmax.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vmax_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vmax.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -948,7 +948,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vmax.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vmax_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vmax.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -988,7 +988,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vmax.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vmax_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vmax.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vmax.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vmax_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vmax.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vmax.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vmax_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vmax.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vmax.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vmax_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vmax.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vmax.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vmax_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vmax.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vmax.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vmax_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vmax.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1228,7 +1228,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vmax.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vmax_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vmax.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1268,7 +1268,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vmax.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vmax_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vmax.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1308,7 +1308,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vmax.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vmax_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vmax.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1348,7 +1348,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vmax.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vmax_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vmax.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1388,7 +1388,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vmax.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vmax_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vmax.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1428,7 +1428,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vmax.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vmax_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vmax.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmax-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmax-rv64.ll
index 28b7bb266a16..91595da73fbe 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmax-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmax-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vmax.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vmax_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vmax.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vmax.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vmax_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vmax.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vmax.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vmax_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vmax.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vmax.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vmax_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vmax.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vmax.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vmax_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vmax.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vmax.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vmax_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vmax.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vmax.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vmax_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv64i8_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vmax.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -308,7 +308,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vmax.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vmax_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vmax.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vmax.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vmax_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vmax.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vmax.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vmax_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vmax.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vmax.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vmax_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vmax.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vmax.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vmax_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vmax.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vmax.mask.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vmax_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv32i16_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vmax.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vmax.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vmax_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vmax.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -588,7 +588,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vmax.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vmax_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vmax.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -628,7 +628,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vmax.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vmax_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vmax.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -668,7 +668,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vmax.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vmax_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vmax.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -708,7 +708,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vmax.mask.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vmax_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv16i32_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vmax.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vmax.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vmax_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv1i64_nxv1i64_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vmax.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -788,7 +788,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vmax.mask.nxv2i64.nxv2i64(
 define <vscale x 2 x i64> @intrinsic_vmax_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv2i64_nxv2i64_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vmax.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -828,7 +828,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vmax.mask.nxv4i64.nxv4i64(
 define <vscale x 4 x i64> @intrinsic_vmax_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv4i64_nxv4i64_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vmax.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -868,7 +868,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vmax.mask.nxv8i64.nxv8i64(
 define <vscale x 8 x i64> @intrinsic_vmax_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv8i64_nxv8i64_nxv8i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vmax.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -908,7 +908,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vmax.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vmax_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vmax.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -948,7 +948,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vmax.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vmax_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vmax.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -988,7 +988,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vmax.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vmax_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vmax.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vmax.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vmax_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vmax.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vmax.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vmax_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vmax.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vmax.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vmax_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vmax.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vmax.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vmax_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vmax.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vmax.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vmax_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vmax.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1228,7 +1228,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vmax.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vmax_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vmax.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1268,7 +1268,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vmax.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vmax_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vmax.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1308,7 +1308,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vmax.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vmax_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vmax.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1348,7 +1348,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vmax.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vmax_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vmax.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1388,7 +1388,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vmax.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vmax_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vmax.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1428,7 +1428,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vmax.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vmax_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vmax.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1468,7 +1468,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vmax.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vmax_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vmax.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1508,7 +1508,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vmax.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vmax_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vmax.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1548,7 +1548,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vmax.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vmax_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vmax.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1588,7 +1588,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vmax.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vmax_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vmax.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1628,7 +1628,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vmax.mask.nxv1i64.i64(
 define <vscale x 1 x i64> @intrinsic_vmax_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv1i64_nxv1i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vmax.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1668,7 +1668,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vmax.mask.nxv2i64.i64(
 define <vscale x 2 x i64> @intrinsic_vmax_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv2i64_nxv2i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vmax.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1708,7 +1708,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vmax.mask.nxv4i64.i64(
 define <vscale x 4 x i64> @intrinsic_vmax_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv4i64_nxv4i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vmax.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1748,7 +1748,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vmax.mask.nxv8i64.i64(
 define <vscale x 8 x i64> @intrinsic_vmax_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv8i64_nxv8i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vmax.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vmax.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv32.ll
index 2cdad607508a..959b85e176fc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vmaxu.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vmaxu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vmaxu.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vmaxu.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vmaxu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vmaxu.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vmaxu.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vmaxu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vmaxu.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vmaxu.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vmaxu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vmaxu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vmaxu.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vmaxu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vmaxu.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vmaxu.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vmaxu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vmaxu.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vmaxu.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vmaxu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv64i8_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vmaxu.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -308,7 +308,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vmaxu.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vmaxu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vmaxu.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vmaxu.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vmaxu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vmaxu.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vmaxu.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vmaxu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vmaxu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vmaxu.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vmaxu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vmaxu.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vmaxu.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vmaxu_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vmaxu.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vmaxu.mask.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vmaxu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv32i16_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vmaxu.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vmaxu.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vmaxu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vmaxu.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -588,7 +588,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vmaxu.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vmaxu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vmaxu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -628,7 +628,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vmaxu.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vmaxu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vmaxu.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -668,7 +668,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vmaxu.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vmaxu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vmaxu.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -708,7 +708,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vmaxu.mask.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vmaxu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv16i32_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vmaxu.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vmaxu.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vmaxu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vmaxu.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -788,7 +788,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vmaxu.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vmaxu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vmaxu.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -828,7 +828,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vmaxu.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vmaxu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vmaxu.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -868,7 +868,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vmaxu.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vmaxu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vmaxu.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -908,7 +908,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vmaxu.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vmaxu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vmaxu.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -948,7 +948,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vmaxu.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vmaxu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vmaxu.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -988,7 +988,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vmaxu.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vmaxu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vmaxu.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vmaxu.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vmaxu_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vmaxu.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vmaxu.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vmaxu_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vmaxu.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vmaxu.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vmaxu_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vmaxu.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vmaxu.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vmaxu_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vmaxu.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vmaxu.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vmaxu_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vmaxu.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1228,7 +1228,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vmaxu.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vmaxu_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vmaxu.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1268,7 +1268,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vmaxu.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vmaxu_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vmaxu.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1308,7 +1308,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vmaxu.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vmaxu_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vmaxu.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1348,7 +1348,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vmaxu.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vmaxu_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vmaxu.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1388,7 +1388,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vmaxu.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vmaxu_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vmaxu.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1428,7 +1428,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vmaxu.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vmaxu_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vmaxu.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv64.ll
index b835cd1c81aa..1d1289c843ab 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vmaxu.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vmaxu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vmaxu.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vmaxu.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vmaxu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vmaxu.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vmaxu.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vmaxu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vmaxu.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vmaxu.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vmaxu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vmaxu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vmaxu.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vmaxu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vmaxu.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vmaxu.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vmaxu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vmaxu.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vmaxu.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vmaxu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv64i8_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vmaxu.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -308,7 +308,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vmaxu.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vmaxu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vmaxu.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vmaxu.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vmaxu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vmaxu.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vmaxu.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vmaxu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vmaxu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vmaxu.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vmaxu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vmaxu.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vmaxu.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vmaxu_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vmaxu.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vmaxu.mask.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vmaxu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv32i16_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vmaxu.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vmaxu.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vmaxu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vmaxu.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -588,7 +588,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vmaxu.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vmaxu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vmaxu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -628,7 +628,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vmaxu.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vmaxu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vmaxu.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -668,7 +668,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vmaxu.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vmaxu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vmaxu.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -708,7 +708,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vmaxu.mask.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vmaxu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv16i32_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vmaxu.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vmaxu.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vmaxu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv1i64_nxv1i64_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vmaxu.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -788,7 +788,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vmaxu.mask.nxv2i64.nxv2i64(
 define <vscale x 2 x i64> @intrinsic_vmaxu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv2i64_nxv2i64_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vmaxu.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -828,7 +828,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vmaxu.mask.nxv4i64.nxv4i64(
 define <vscale x 4 x i64> @intrinsic_vmaxu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv4i64_nxv4i64_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vmaxu.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -868,7 +868,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vmaxu.mask.nxv8i64.nxv8i64(
 define <vscale x 8 x i64> @intrinsic_vmaxu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv8i64_nxv8i64_nxv8i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vmaxu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vmaxu.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -908,7 +908,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vmaxu.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vmaxu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vmaxu.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -948,7 +948,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vmaxu.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vmaxu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vmaxu.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -988,7 +988,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vmaxu.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vmaxu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vmaxu.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vmaxu.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vmaxu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vmaxu.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vmaxu.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vmaxu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vmaxu.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vmaxu.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vmaxu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vmaxu.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vmaxu.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vmaxu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vmaxu.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vmaxu.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vmaxu_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vmaxu.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1228,7 +1228,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vmaxu.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vmaxu_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vmaxu.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1268,7 +1268,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vmaxu.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vmaxu_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vmaxu.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1308,7 +1308,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vmaxu.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vmaxu_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vmaxu.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1348,7 +1348,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vmaxu.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vmaxu_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vmaxu.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1388,7 +1388,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vmaxu.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vmaxu_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vmaxu.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1428,7 +1428,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vmaxu.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vmaxu_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vmaxu.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1468,7 +1468,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vmaxu.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vmaxu_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vmaxu.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1508,7 +1508,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vmaxu.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vmaxu_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vmaxu.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1548,7 +1548,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vmaxu.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vmaxu_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vmaxu.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1588,7 +1588,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vmaxu.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vmaxu_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vmaxu.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1628,7 +1628,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vmaxu.mask.nxv1i64.i64(
 define <vscale x 1 x i64> @intrinsic_vmaxu_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv1i64_nxv1i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vmaxu.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1668,7 +1668,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vmaxu.mask.nxv2i64.i64(
 define <vscale x 2 x i64> @intrinsic_vmaxu_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv2i64_nxv2i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vmaxu.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1708,7 +1708,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vmaxu.mask.nxv4i64.i64(
 define <vscale x 4 x i64> @intrinsic_vmaxu_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv4i64_nxv4i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vmaxu.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1748,7 +1748,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vmaxu.mask.nxv8i64.i64(
 define <vscale x 8 x i64> @intrinsic_vmaxu_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv8i64_nxv8i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vmaxu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vmaxu.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmfeq-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmfeq-rv32.ll
index 256263bae0bb..7ceaca42f6be 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfeq-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfeq-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f16(
 define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x half> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f16(
     <vscale x 1 x half> %1,
@@ -72,7 +72,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f16(
 define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x half> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f16(
     <vscale x 2 x half> %1,
@@ -116,7 +116,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f16(
 define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x half> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f16(
     <vscale x 4 x half> %1,
@@ -160,7 +160,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f16(
 define <vscale x 8 x i1> @intrinsic_vmfeq_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x half> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f16(
     <vscale x 8 x half> %1,
@@ -204,7 +204,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16f16(
 define <vscale x 16 x i1> @intrinsic_vmfeq_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x half> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmfeq.nxv16f16(
     <vscale x 16 x half> %1,
@@ -248,7 +248,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f32(
 define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x float> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f32(
     <vscale x 1 x float> %1,
@@ -292,7 +292,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f32(
 define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x float> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f32(
     <vscale x 2 x float> %1,
@@ -336,7 +336,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f32(
 define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x float> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f32(
     <vscale x 4 x float> %1,
@@ -380,7 +380,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f32(
 define <vscale x 8 x i1> @intrinsic_vmfeq_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x float> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f32(
     <vscale x 8 x float> %1,
@@ -424,7 +424,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f16.f16(
 define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vf_nxv1f16_f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv1f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f16.f16(
     <vscale x 1 x i1> %0,
@@ -464,7 +464,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f16.f16(
 define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vf_nxv2f16_f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv2f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f16.f16(
     <vscale x 2 x i1> %0,
@@ -504,7 +504,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f16.f16(
 define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vf_nxv4f16_f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv4f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f16.f16(
     <vscale x 4 x i1> %0,
@@ -544,7 +544,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f16.f16(
 define <vscale x 8 x i1> @intrinsic_vmfeq_mask_vf_nxv8f16_f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv8f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f16.f16(
     <vscale x 8 x i1> %0,
@@ -584,7 +584,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16f16.f16(
 define <vscale x 16 x i1> @intrinsic_vmfeq_mask_vf_nxv16f16_f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv16f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16f16.f16(
     <vscale x 16 x i1> %0,
@@ -624,7 +624,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f32.f32(
 define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vf_nxv1f32_f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv1f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f32.f32(
     <vscale x 1 x i1> %0,
@@ -664,7 +664,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f32.f32(
 define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vf_nxv2f32_f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv2f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f32.f32(
     <vscale x 2 x i1> %0,
@@ -704,7 +704,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f32.f32(
 define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vf_nxv4f32_f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv4f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f32.f32(
     <vscale x 4 x i1> %0,
@@ -744,7 +744,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f32.f32(
 define <vscale x 8 x i1> @intrinsic_vmfeq_mask_vf_nxv8f32_f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv8f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f32.f32(
     <vscale x 8 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmfeq-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmfeq-rv64.ll
index 577fa6d2ce19..2253ea07c1f0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfeq-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfeq-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f16(
 define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x half> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f16(
     <vscale x 1 x half> %1,
@@ -72,7 +72,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f16(
 define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x half> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f16(
     <vscale x 2 x half> %1,
@@ -116,7 +116,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f16(
 define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x half> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f16(
     <vscale x 4 x half> %1,
@@ -160,7 +160,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f16(
 define <vscale x 8 x i1> @intrinsic_vmfeq_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x half> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f16(
     <vscale x 8 x half> %1,
@@ -204,7 +204,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16f16(
 define <vscale x 16 x i1> @intrinsic_vmfeq_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x half> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmfeq.nxv16f16(
     <vscale x 16 x half> %1,
@@ -248,7 +248,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f32(
 define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x float> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f32(
     <vscale x 1 x float> %1,
@@ -292,7 +292,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f32(
 define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x float> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f32(
     <vscale x 2 x float> %1,
@@ -336,7 +336,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f32(
 define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x float> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f32(
     <vscale x 4 x float> %1,
@@ -380,7 +380,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f32(
 define <vscale x 8 x i1> @intrinsic_vmfeq_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x float> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f32(
     <vscale x 8 x float> %1,
@@ -424,7 +424,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f64(
 define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x double> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv1f64_nxv1f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f64(
     <vscale x 1 x double> %1,
@@ -468,7 +468,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f64(
 define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x double> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv2f64_nxv2f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f64(
     <vscale x 2 x double> %1,
@@ -512,7 +512,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f64(
 define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x double> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv4f64_nxv4f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f64(
     <vscale x 4 x double> %1,
@@ -556,7 +556,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f16.f16(
 define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vf_nxv1f16_f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv1f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f16.f16(
     <vscale x 1 x i1> %0,
@@ -596,7 +596,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f16.f16(
 define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vf_nxv2f16_f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv2f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f16.f16(
     <vscale x 2 x i1> %0,
@@ -636,7 +636,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f16.f16(
 define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vf_nxv4f16_f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv4f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f16.f16(
     <vscale x 4 x i1> %0,
@@ -676,7 +676,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f16.f16(
 define <vscale x 8 x i1> @intrinsic_vmfeq_mask_vf_nxv8f16_f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv8f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f16.f16(
     <vscale x 8 x i1> %0,
@@ -716,7 +716,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16f16.f16(
 define <vscale x 16 x i1> @intrinsic_vmfeq_mask_vf_nxv16f16_f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv16f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16f16.f16(
     <vscale x 16 x i1> %0,
@@ -756,7 +756,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f32.f32(
 define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vf_nxv1f32_f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv1f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f32.f32(
     <vscale x 1 x i1> %0,
@@ -796,7 +796,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f32.f32(
 define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vf_nxv2f32_f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv2f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f32.f32(
     <vscale x 2 x i1> %0,
@@ -836,7 +836,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f32.f32(
 define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vf_nxv4f32_f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv4f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f32.f32(
     <vscale x 4 x i1> %0,
@@ -876,7 +876,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f32.f32(
 define <vscale x 8 x i1> @intrinsic_vmfeq_mask_vf_nxv8f32_f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv8f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f32.f32(
     <vscale x 8 x i1> %0,
@@ -916,7 +916,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f64.f64(
 define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vf_nxv1f64_f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv1f64_f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f64.f64(
     <vscale x 1 x i1> %0,
@@ -956,7 +956,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f64.f64(
 define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vf_nxv2f64_f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv2f64_f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f64.f64(
     <vscale x 2 x i1> %0,
@@ -996,7 +996,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f64.f64(
 define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vf_nxv4f64_f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv4f64_f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f64.f64(
     <vscale x 4 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmfge-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmfge-rv32.ll
index 42189c52c228..0cac5b143275 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfge-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfge-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f16.f16(
 define <vscale x 1 x i1> @intrinsic_vmfge_mask_vf_nxv1f16_f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv1f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f16.f16(
     <vscale x 1 x i1> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f16.f16(
 define <vscale x 2 x i1> @intrinsic_vmfge_mask_vf_nxv2f16_f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv2f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f16.f16(
     <vscale x 2 x i1> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f16.f16(
 define <vscale x 4 x i1> @intrinsic_vmfge_mask_vf_nxv4f16_f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv4f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f16.f16(
     <vscale x 4 x i1> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f16.f16(
 define <vscale x 8 x i1> @intrinsic_vmfge_mask_vf_nxv8f16_f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv8f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f16.f16(
     <vscale x 8 x i1> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16f16.f16(
 define <vscale x 16 x i1> @intrinsic_vmfge_mask_vf_nxv16f16_f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv16f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16f16.f16(
     <vscale x 16 x i1> %0,
@@ -228,7 +228,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f32.f32(
 define <vscale x 1 x i1> @intrinsic_vmfge_mask_vf_nxv1f32_f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv1f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f32.f32(
     <vscale x 1 x i1> %0,
@@ -268,7 +268,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f32.f32(
 define <vscale x 2 x i1> @intrinsic_vmfge_mask_vf_nxv2f32_f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv2f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f32.f32(
     <vscale x 2 x i1> %0,
@@ -308,7 +308,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f32.f32(
 define <vscale x 4 x i1> @intrinsic_vmfge_mask_vf_nxv4f32_f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv4f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f32.f32(
     <vscale x 4 x i1> %0,
@@ -348,7 +348,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f32.f32(
 define <vscale x 8 x i1> @intrinsic_vmfge_mask_vf_nxv8f32_f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv8f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f32.f32(
     <vscale x 8 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmfge-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmfge-rv64.ll
index 8d6af219ce13..f62288ec544a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfge-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfge-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f16.f16(
 define <vscale x 1 x i1> @intrinsic_vmfge_mask_vf_nxv1f16_f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv1f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f16.f16(
     <vscale x 1 x i1> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f16.f16(
 define <vscale x 2 x i1> @intrinsic_vmfge_mask_vf_nxv2f16_f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv2f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f16.f16(
     <vscale x 2 x i1> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f16.f16(
 define <vscale x 4 x i1> @intrinsic_vmfge_mask_vf_nxv4f16_f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv4f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f16.f16(
     <vscale x 4 x i1> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f16.f16(
 define <vscale x 8 x i1> @intrinsic_vmfge_mask_vf_nxv8f16_f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv8f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f16.f16(
     <vscale x 8 x i1> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16f16.f16(
 define <vscale x 16 x i1> @intrinsic_vmfge_mask_vf_nxv16f16_f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv16f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16f16.f16(
     <vscale x 16 x i1> %0,
@@ -228,7 +228,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f32.f32(
 define <vscale x 1 x i1> @intrinsic_vmfge_mask_vf_nxv1f32_f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv1f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f32.f32(
     <vscale x 1 x i1> %0,
@@ -268,7 +268,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f32.f32(
 define <vscale x 2 x i1> @intrinsic_vmfge_mask_vf_nxv2f32_f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv2f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f32.f32(
     <vscale x 2 x i1> %0,
@@ -308,7 +308,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f32.f32(
 define <vscale x 4 x i1> @intrinsic_vmfge_mask_vf_nxv4f32_f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv4f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f32.f32(
     <vscale x 4 x i1> %0,
@@ -348,7 +348,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f32.f32(
 define <vscale x 8 x i1> @intrinsic_vmfge_mask_vf_nxv8f32_f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv8f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f32.f32(
     <vscale x 8 x i1> %0,
@@ -388,7 +388,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f64.f64(
 define <vscale x 1 x i1> @intrinsic_vmfge_mask_vf_nxv1f64_f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv1f64_f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f64.f64(
     <vscale x 1 x i1> %0,
@@ -428,7 +428,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f64.f64(
 define <vscale x 2 x i1> @intrinsic_vmfge_mask_vf_nxv2f64_f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv2f64_f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f64.f64(
     <vscale x 2 x i1> %0,
@@ -468,7 +468,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f64.f64(
 define <vscale x 4 x i1> @intrinsic_vmfge_mask_vf_nxv4f64_f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv4f64_f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f64.f64(
     <vscale x 4 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmfgt-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmfgt-rv32.ll
index 724d0e8afc0f..c688fcda13a2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfgt-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfgt-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f16.f16(
 define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vf_nxv1f16_f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv1f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f16.f16(
     <vscale x 1 x i1> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f16.f16(
 define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vf_nxv2f16_f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv2f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f16.f16(
     <vscale x 2 x i1> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f16.f16(
 define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vf_nxv4f16_f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv4f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f16.f16(
     <vscale x 4 x i1> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8f16.f16(
 define <vscale x 8 x i1> @intrinsic_vmfgt_mask_vf_nxv8f16_f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv8f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8f16.f16(
     <vscale x 8 x i1> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16f16.f16(
 define <vscale x 16 x i1> @intrinsic_vmfgt_mask_vf_nxv16f16_f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv16f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16f16.f16(
     <vscale x 16 x i1> %0,
@@ -228,7 +228,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f32.f32(
 define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vf_nxv1f32_f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv1f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f32.f32(
     <vscale x 1 x i1> %0,
@@ -268,7 +268,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f32.f32(
 define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vf_nxv2f32_f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv2f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f32.f32(
     <vscale x 2 x i1> %0,
@@ -308,7 +308,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f32.f32(
 define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vf_nxv4f32_f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv4f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f32.f32(
     <vscale x 4 x i1> %0,
@@ -348,7 +348,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8f32.f32(
 define <vscale x 8 x i1> @intrinsic_vmfgt_mask_vf_nxv8f32_f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv8f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8f32.f32(
     <vscale x 8 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmfgt-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmfgt-rv64.ll
index 6145ac78ae07..4cca708429bb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfgt-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfgt-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f16.f16(
 define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vf_nxv1f16_f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv1f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f16.f16(
     <vscale x 1 x i1> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f16.f16(
 define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vf_nxv2f16_f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv2f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f16.f16(
     <vscale x 2 x i1> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f16.f16(
 define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vf_nxv4f16_f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv4f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f16.f16(
     <vscale x 4 x i1> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8f16.f16(
 define <vscale x 8 x i1> @intrinsic_vmfgt_mask_vf_nxv8f16_f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv8f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8f16.f16(
     <vscale x 8 x i1> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16f16.f16(
 define <vscale x 16 x i1> @intrinsic_vmfgt_mask_vf_nxv16f16_f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv16f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16f16.f16(
     <vscale x 16 x i1> %0,
@@ -228,7 +228,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f32.f32(
 define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vf_nxv1f32_f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv1f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f32.f32(
     <vscale x 1 x i1> %0,
@@ -268,7 +268,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f32.f32(
 define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vf_nxv2f32_f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv2f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f32.f32(
     <vscale x 2 x i1> %0,
@@ -308,7 +308,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f32.f32(
 define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vf_nxv4f32_f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv4f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f32.f32(
     <vscale x 4 x i1> %0,
@@ -348,7 +348,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8f32.f32(
 define <vscale x 8 x i1> @intrinsic_vmfgt_mask_vf_nxv8f32_f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv8f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8f32.f32(
     <vscale x 8 x i1> %0,
@@ -388,7 +388,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f64.f64(
 define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vf_nxv1f64_f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv1f64_f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f64.f64(
     <vscale x 1 x i1> %0,
@@ -428,7 +428,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f64.f64(
 define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vf_nxv2f64_f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv2f64_f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f64.f64(
     <vscale x 2 x i1> %0,
@@ -468,7 +468,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f64.f64(
 define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vf_nxv4f64_f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv4f64_f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f64.f64(
     <vscale x 4 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmfle-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmfle-rv32.ll
index 870c1fdd7bc5..e59ae99bf909 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfle-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfle-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f16(
 define <vscale x 1 x i1> @intrinsic_vmfle_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x half> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f16(
     <vscale x 1 x half> %1,
@@ -72,7 +72,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f16(
 define <vscale x 2 x i1> @intrinsic_vmfle_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x half> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f16(
     <vscale x 2 x half> %1,
@@ -116,7 +116,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f16(
 define <vscale x 4 x i1> @intrinsic_vmfle_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x half> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f16(
     <vscale x 4 x half> %1,
@@ -160,7 +160,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f16(
 define <vscale x 8 x i1> @intrinsic_vmfle_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x half> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f16(
     <vscale x 8 x half> %1,
@@ -204,7 +204,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16f16(
 define <vscale x 16 x i1> @intrinsic_vmfle_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x half> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16f16(
     <vscale x 16 x half> %1,
@@ -248,7 +248,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32(
 define <vscale x 1 x i1> @intrinsic_vmfle_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x float> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f32(
     <vscale x 1 x float> %1,
@@ -292,7 +292,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f32(
 define <vscale x 2 x i1> @intrinsic_vmfle_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x float> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f32(
     <vscale x 2 x float> %1,
@@ -336,7 +336,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f32(
 define <vscale x 4 x i1> @intrinsic_vmfle_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x float> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f32(
     <vscale x 4 x float> %1,
@@ -380,7 +380,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f32(
 define <vscale x 8 x i1> @intrinsic_vmfle_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x float> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f32(
     <vscale x 8 x float> %1,
@@ -424,7 +424,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f16.f16(
 define <vscale x 1 x i1> @intrinsic_vmfle_mask_vf_nxv1f16_f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv1f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f16.f16(
     <vscale x 1 x i1> %0,
@@ -464,7 +464,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f16.f16(
 define <vscale x 2 x i1> @intrinsic_vmfle_mask_vf_nxv2f16_f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv2f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f16.f16(
     <vscale x 2 x i1> %0,
@@ -504,7 +504,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f16.f16(
 define <vscale x 4 x i1> @intrinsic_vmfle_mask_vf_nxv4f16_f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv4f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f16.f16(
     <vscale x 4 x i1> %0,
@@ -544,7 +544,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f16.f16(
 define <vscale x 8 x i1> @intrinsic_vmfle_mask_vf_nxv8f16_f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv8f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f16.f16(
     <vscale x 8 x i1> %0,
@@ -584,7 +584,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16f16.f16(
 define <vscale x 16 x i1> @intrinsic_vmfle_mask_vf_nxv16f16_f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv16f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16f16.f16(
     <vscale x 16 x i1> %0,
@@ -624,7 +624,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32.f32(
 define <vscale x 1 x i1> @intrinsic_vmfle_mask_vf_nxv1f32_f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv1f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32.f32(
     <vscale x 1 x i1> %0,
@@ -664,7 +664,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f32.f32(
 define <vscale x 2 x i1> @intrinsic_vmfle_mask_vf_nxv2f32_f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv2f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f32.f32(
     <vscale x 2 x i1> %0,
@@ -704,7 +704,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f32.f32(
 define <vscale x 4 x i1> @intrinsic_vmfle_mask_vf_nxv4f32_f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv4f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f32.f32(
     <vscale x 4 x i1> %0,
@@ -744,7 +744,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f32.f32(
 define <vscale x 8 x i1> @intrinsic_vmfle_mask_vf_nxv8f32_f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv8f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f32.f32(
     <vscale x 8 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmfle-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmfle-rv64.ll
index 5d5f3cc4de9a..5160de0a98b7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfle-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfle-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f16(
 define <vscale x 1 x i1> @intrinsic_vmfle_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x half> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f16(
     <vscale x 1 x half> %1,
@@ -72,7 +72,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f16(
 define <vscale x 2 x i1> @intrinsic_vmfle_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x half> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f16(
     <vscale x 2 x half> %1,
@@ -116,7 +116,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f16(
 define <vscale x 4 x i1> @intrinsic_vmfle_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x half> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f16(
     <vscale x 4 x half> %1,
@@ -160,7 +160,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f16(
 define <vscale x 8 x i1> @intrinsic_vmfle_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x half> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f16(
     <vscale x 8 x half> %1,
@@ -204,7 +204,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16f16(
 define <vscale x 16 x i1> @intrinsic_vmfle_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x half> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16f16(
     <vscale x 16 x half> %1,
@@ -248,7 +248,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32(
 define <vscale x 1 x i1> @intrinsic_vmfle_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x float> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f32(
     <vscale x 1 x float> %1,
@@ -292,7 +292,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f32(
 define <vscale x 2 x i1> @intrinsic_vmfle_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x float> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f32(
     <vscale x 2 x float> %1,
@@ -336,7 +336,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f32(
 define <vscale x 4 x i1> @intrinsic_vmfle_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x float> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f32(
     <vscale x 4 x float> %1,
@@ -380,7 +380,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f32(
 define <vscale x 8 x i1> @intrinsic_vmfle_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x float> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f32(
     <vscale x 8 x float> %1,
@@ -424,7 +424,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f64(
 define <vscale x 1 x i1> @intrinsic_vmfle_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x double> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv1f64_nxv1f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f64(
     <vscale x 1 x double> %1,
@@ -468,7 +468,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f64(
 define <vscale x 2 x i1> @intrinsic_vmfle_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x double> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv2f64_nxv2f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f64(
     <vscale x 2 x double> %1,
@@ -512,7 +512,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f64(
 define <vscale x 4 x i1> @intrinsic_vmfle_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x double> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv4f64_nxv4f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f64(
     <vscale x 4 x double> %1,
@@ -556,7 +556,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f16.f16(
 define <vscale x 1 x i1> @intrinsic_vmfle_mask_vf_nxv1f16_f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv1f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f16.f16(
     <vscale x 1 x i1> %0,
@@ -596,7 +596,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f16.f16(
 define <vscale x 2 x i1> @intrinsic_vmfle_mask_vf_nxv2f16_f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv2f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f16.f16(
     <vscale x 2 x i1> %0,
@@ -636,7 +636,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f16.f16(
 define <vscale x 4 x i1> @intrinsic_vmfle_mask_vf_nxv4f16_f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv4f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f16.f16(
     <vscale x 4 x i1> %0,
@@ -676,7 +676,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f16.f16(
 define <vscale x 8 x i1> @intrinsic_vmfle_mask_vf_nxv8f16_f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv8f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f16.f16(
     <vscale x 8 x i1> %0,
@@ -716,7 +716,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16f16.f16(
 define <vscale x 16 x i1> @intrinsic_vmfle_mask_vf_nxv16f16_f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv16f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16f16.f16(
     <vscale x 16 x i1> %0,
@@ -756,7 +756,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32.f32(
 define <vscale x 1 x i1> @intrinsic_vmfle_mask_vf_nxv1f32_f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv1f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32.f32(
     <vscale x 1 x i1> %0,
@@ -796,7 +796,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f32.f32(
 define <vscale x 2 x i1> @intrinsic_vmfle_mask_vf_nxv2f32_f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv2f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f32.f32(
     <vscale x 2 x i1> %0,
@@ -836,7 +836,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f32.f32(
 define <vscale x 4 x i1> @intrinsic_vmfle_mask_vf_nxv4f32_f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv4f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f32.f32(
     <vscale x 4 x i1> %0,
@@ -876,7 +876,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f32.f32(
 define <vscale x 8 x i1> @intrinsic_vmfle_mask_vf_nxv8f32_f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv8f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f32.f32(
     <vscale x 8 x i1> %0,
@@ -916,7 +916,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f64.f64(
 define <vscale x 1 x i1> @intrinsic_vmfle_mask_vf_nxv1f64_f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv1f64_f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f64.f64(
     <vscale x 1 x i1> %0,
@@ -956,7 +956,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f64.f64(
 define <vscale x 2 x i1> @intrinsic_vmfle_mask_vf_nxv2f64_f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv2f64_f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f64.f64(
     <vscale x 2 x i1> %0,
@@ -996,7 +996,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f64.f64(
 define <vscale x 4 x i1> @intrinsic_vmfle_mask_vf_nxv4f64_f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv4f64_f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f64.f64(
     <vscale x 4 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmflt-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmflt-rv32.ll
index 95d1046eb992..134a313a69ad 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmflt-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmflt-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f16(
 define <vscale x 1 x i1> @intrinsic_vmflt_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x half> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f16(
     <vscale x 1 x half> %1,
@@ -72,7 +72,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f16(
 define <vscale x 2 x i1> @intrinsic_vmflt_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x half> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f16(
     <vscale x 2 x half> %1,
@@ -116,7 +116,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f16(
 define <vscale x 4 x i1> @intrinsic_vmflt_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x half> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f16(
     <vscale x 4 x half> %1,
@@ -160,7 +160,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f16(
 define <vscale x 8 x i1> @intrinsic_vmflt_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x half> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f16(
     <vscale x 8 x half> %1,
@@ -204,7 +204,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16f16(
 define <vscale x 16 x i1> @intrinsic_vmflt_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x half> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmflt.nxv16f16(
     <vscale x 16 x half> %1,
@@ -248,7 +248,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f32(
 define <vscale x 1 x i1> @intrinsic_vmflt_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x float> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f32(
     <vscale x 1 x float> %1,
@@ -292,7 +292,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f32(
 define <vscale x 2 x i1> @intrinsic_vmflt_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x float> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f32(
     <vscale x 2 x float> %1,
@@ -336,7 +336,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f32(
 define <vscale x 4 x i1> @intrinsic_vmflt_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x float> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f32(
     <vscale x 4 x float> %1,
@@ -380,7 +380,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f32(
 define <vscale x 8 x i1> @intrinsic_vmflt_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x float> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f32(
     <vscale x 8 x float> %1,
@@ -424,7 +424,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f16.f16(
 define <vscale x 1 x i1> @intrinsic_vmflt_mask_vf_nxv1f16_f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv1f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f16.f16(
     <vscale x 1 x i1> %0,
@@ -464,7 +464,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f16.f16(
 define <vscale x 2 x i1> @intrinsic_vmflt_mask_vf_nxv2f16_f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv2f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f16.f16(
     <vscale x 2 x i1> %0,
@@ -504,7 +504,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f16.f16(
 define <vscale x 4 x i1> @intrinsic_vmflt_mask_vf_nxv4f16_f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv4f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f16.f16(
     <vscale x 4 x i1> %0,
@@ -544,7 +544,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f16.f16(
 define <vscale x 8 x i1> @intrinsic_vmflt_mask_vf_nxv8f16_f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv8f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f16.f16(
     <vscale x 8 x i1> %0,
@@ -584,7 +584,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16f16.f16(
 define <vscale x 16 x i1> @intrinsic_vmflt_mask_vf_nxv16f16_f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv16f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16f16.f16(
     <vscale x 16 x i1> %0,
@@ -624,7 +624,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f32.f32(
 define <vscale x 1 x i1> @intrinsic_vmflt_mask_vf_nxv1f32_f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv1f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f32.f32(
     <vscale x 1 x i1> %0,
@@ -664,7 +664,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f32.f32(
 define <vscale x 2 x i1> @intrinsic_vmflt_mask_vf_nxv2f32_f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv2f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f32.f32(
     <vscale x 2 x i1> %0,
@@ -704,7 +704,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f32.f32(
 define <vscale x 4 x i1> @intrinsic_vmflt_mask_vf_nxv4f32_f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv4f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f32.f32(
     <vscale x 4 x i1> %0,
@@ -744,7 +744,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f32.f32(
 define <vscale x 8 x i1> @intrinsic_vmflt_mask_vf_nxv8f32_f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv8f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f32.f32(
     <vscale x 8 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmflt-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmflt-rv64.ll
index 80eebbe0c8dc..825aab40948c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmflt-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmflt-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f16(
 define <vscale x 1 x i1> @intrinsic_vmflt_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x half> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f16(
     <vscale x 1 x half> %1,
@@ -72,7 +72,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f16(
 define <vscale x 2 x i1> @intrinsic_vmflt_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x half> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f16(
     <vscale x 2 x half> %1,
@@ -116,7 +116,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f16(
 define <vscale x 4 x i1> @intrinsic_vmflt_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x half> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f16(
     <vscale x 4 x half> %1,
@@ -160,7 +160,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f16(
 define <vscale x 8 x i1> @intrinsic_vmflt_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x half> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f16(
     <vscale x 8 x half> %1,
@@ -204,7 +204,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16f16(
 define <vscale x 16 x i1> @intrinsic_vmflt_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x half> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmflt.nxv16f16(
     <vscale x 16 x half> %1,
@@ -248,7 +248,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f32(
 define <vscale x 1 x i1> @intrinsic_vmflt_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x float> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f32(
     <vscale x 1 x float> %1,
@@ -292,7 +292,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f32(
 define <vscale x 2 x i1> @intrinsic_vmflt_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x float> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f32(
     <vscale x 2 x float> %1,
@@ -336,7 +336,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f32(
 define <vscale x 4 x i1> @intrinsic_vmflt_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x float> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f32(
     <vscale x 4 x float> %1,
@@ -380,7 +380,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f32(
 define <vscale x 8 x i1> @intrinsic_vmflt_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x float> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f32(
     <vscale x 8 x float> %1,
@@ -424,7 +424,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f64(
 define <vscale x 1 x i1> @intrinsic_vmflt_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x double> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv1f64_nxv1f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f64(
     <vscale x 1 x double> %1,
@@ -468,7 +468,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f64(
 define <vscale x 2 x i1> @intrinsic_vmflt_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x double> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv2f64_nxv2f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f64(
     <vscale x 2 x double> %1,
@@ -512,7 +512,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f64(
 define <vscale x 4 x i1> @intrinsic_vmflt_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x double> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv4f64_nxv4f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f64(
     <vscale x 4 x double> %1,
@@ -556,7 +556,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f16.f16(
 define <vscale x 1 x i1> @intrinsic_vmflt_mask_vf_nxv1f16_f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv1f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f16.f16(
     <vscale x 1 x i1> %0,
@@ -596,7 +596,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f16.f16(
 define <vscale x 2 x i1> @intrinsic_vmflt_mask_vf_nxv2f16_f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv2f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f16.f16(
     <vscale x 2 x i1> %0,
@@ -636,7 +636,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f16.f16(
 define <vscale x 4 x i1> @intrinsic_vmflt_mask_vf_nxv4f16_f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv4f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f16.f16(
     <vscale x 4 x i1> %0,
@@ -676,7 +676,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f16.f16(
 define <vscale x 8 x i1> @intrinsic_vmflt_mask_vf_nxv8f16_f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv8f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f16.f16(
     <vscale x 8 x i1> %0,
@@ -716,7 +716,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16f16.f16(
 define <vscale x 16 x i1> @intrinsic_vmflt_mask_vf_nxv16f16_f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv16f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16f16.f16(
     <vscale x 16 x i1> %0,
@@ -756,7 +756,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f32.f32(
 define <vscale x 1 x i1> @intrinsic_vmflt_mask_vf_nxv1f32_f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv1f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f32.f32(
     <vscale x 1 x i1> %0,
@@ -796,7 +796,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f32.f32(
 define <vscale x 2 x i1> @intrinsic_vmflt_mask_vf_nxv2f32_f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv2f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f32.f32(
     <vscale x 2 x i1> %0,
@@ -836,7 +836,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f32.f32(
 define <vscale x 4 x i1> @intrinsic_vmflt_mask_vf_nxv4f32_f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv4f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f32.f32(
     <vscale x 4 x i1> %0,
@@ -876,7 +876,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f32.f32(
 define <vscale x 8 x i1> @intrinsic_vmflt_mask_vf_nxv8f32_f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv8f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f32.f32(
     <vscale x 8 x i1> %0,
@@ -916,7 +916,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f64.f64(
 define <vscale x 1 x i1> @intrinsic_vmflt_mask_vf_nxv1f64_f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv1f64_f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f64.f64(
     <vscale x 1 x i1> %0,
@@ -956,7 +956,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f64.f64(
 define <vscale x 2 x i1> @intrinsic_vmflt_mask_vf_nxv2f64_f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv2f64_f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f64.f64(
     <vscale x 2 x i1> %0,
@@ -996,7 +996,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f64.f64(
 define <vscale x 4 x i1> @intrinsic_vmflt_mask_vf_nxv4f64_f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv4f64_f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f64.f64(
     <vscale x 4 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmfne-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmfne-rv32.ll
index e98d68159bf4..71a892991e82 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfne-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfne-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f16(
 define <vscale x 1 x i1> @intrinsic_vmfne_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x half> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f16(
     <vscale x 1 x half> %1,
@@ -72,7 +72,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f16(
 define <vscale x 2 x i1> @intrinsic_vmfne_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x half> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f16(
     <vscale x 2 x half> %1,
@@ -116,7 +116,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f16(
 define <vscale x 4 x i1> @intrinsic_vmfne_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x half> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f16(
     <vscale x 4 x half> %1,
@@ -160,7 +160,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f16(
 define <vscale x 8 x i1> @intrinsic_vmfne_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x half> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f16(
     <vscale x 8 x half> %1,
@@ -204,7 +204,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16f16(
 define <vscale x 16 x i1> @intrinsic_vmfne_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x half> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16f16(
     <vscale x 16 x half> %1,
@@ -248,7 +248,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32(
 define <vscale x 1 x i1> @intrinsic_vmfne_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x float> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f32(
     <vscale x 1 x float> %1,
@@ -292,7 +292,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f32(
 define <vscale x 2 x i1> @intrinsic_vmfne_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x float> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f32(
     <vscale x 2 x float> %1,
@@ -336,7 +336,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f32(
 define <vscale x 4 x i1> @intrinsic_vmfne_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x float> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f32(
     <vscale x 4 x float> %1,
@@ -380,7 +380,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f32(
 define <vscale x 8 x i1> @intrinsic_vmfne_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x float> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f32(
     <vscale x 8 x float> %1,
@@ -424,7 +424,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f16.f16(
 define <vscale x 1 x i1> @intrinsic_vmfne_mask_vf_nxv1f16_f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv1f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f16.f16(
     <vscale x 1 x i1> %0,
@@ -464,7 +464,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f16.f16(
 define <vscale x 2 x i1> @intrinsic_vmfne_mask_vf_nxv2f16_f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv2f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f16.f16(
     <vscale x 2 x i1> %0,
@@ -504,7 +504,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f16.f16(
 define <vscale x 4 x i1> @intrinsic_vmfne_mask_vf_nxv4f16_f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv4f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f16.f16(
     <vscale x 4 x i1> %0,
@@ -544,7 +544,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f16.f16(
 define <vscale x 8 x i1> @intrinsic_vmfne_mask_vf_nxv8f16_f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv8f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f16.f16(
     <vscale x 8 x i1> %0,
@@ -584,7 +584,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16f16.f16(
 define <vscale x 16 x i1> @intrinsic_vmfne_mask_vf_nxv16f16_f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv16f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16f16.f16(
     <vscale x 16 x i1> %0,
@@ -624,7 +624,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32.f32(
 define <vscale x 1 x i1> @intrinsic_vmfne_mask_vf_nxv1f32_f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv1f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32.f32(
     <vscale x 1 x i1> %0,
@@ -664,7 +664,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f32.f32(
 define <vscale x 2 x i1> @intrinsic_vmfne_mask_vf_nxv2f32_f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv2f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f32.f32(
     <vscale x 2 x i1> %0,
@@ -704,7 +704,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f32.f32(
 define <vscale x 4 x i1> @intrinsic_vmfne_mask_vf_nxv4f32_f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv4f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f32.f32(
     <vscale x 4 x i1> %0,
@@ -744,7 +744,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f32.f32(
 define <vscale x 8 x i1> @intrinsic_vmfne_mask_vf_nxv8f32_f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv8f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f32.f32(
     <vscale x 8 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmfne-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmfne-rv64.ll
index 59e85968efbf..66966462d72d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfne-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfne-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f16(
 define <vscale x 1 x i1> @intrinsic_vmfne_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x half> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f16(
     <vscale x 1 x half> %1,
@@ -72,7 +72,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f16(
 define <vscale x 2 x i1> @intrinsic_vmfne_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x half> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f16(
     <vscale x 2 x half> %1,
@@ -116,7 +116,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f16(
 define <vscale x 4 x i1> @intrinsic_vmfne_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x half> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f16(
     <vscale x 4 x half> %1,
@@ -160,7 +160,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f16(
 define <vscale x 8 x i1> @intrinsic_vmfne_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x half> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f16(
     <vscale x 8 x half> %1,
@@ -204,7 +204,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16f16(
 define <vscale x 16 x i1> @intrinsic_vmfne_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x half> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16f16(
     <vscale x 16 x half> %1,
@@ -248,7 +248,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32(
 define <vscale x 1 x i1> @intrinsic_vmfne_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x float> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f32(
     <vscale x 1 x float> %1,
@@ -292,7 +292,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f32(
 define <vscale x 2 x i1> @intrinsic_vmfne_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x float> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f32(
     <vscale x 2 x float> %1,
@@ -336,7 +336,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f32(
 define <vscale x 4 x i1> @intrinsic_vmfne_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x float> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f32(
     <vscale x 4 x float> %1,
@@ -380,7 +380,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f32(
 define <vscale x 8 x i1> @intrinsic_vmfne_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x float> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f32(
     <vscale x 8 x float> %1,
@@ -424,7 +424,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f64(
 define <vscale x 1 x i1> @intrinsic_vmfne_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x double> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv1f64_nxv1f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f64(
     <vscale x 1 x double> %1,
@@ -468,7 +468,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f64(
 define <vscale x 2 x i1> @intrinsic_vmfne_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x double> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv2f64_nxv2f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f64(
     <vscale x 2 x double> %1,
@@ -512,7 +512,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f64(
 define <vscale x 4 x i1> @intrinsic_vmfne_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x double> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv4f64_nxv4f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f64(
     <vscale x 4 x double> %1,
@@ -556,7 +556,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f16.f16(
 define <vscale x 1 x i1> @intrinsic_vmfne_mask_vf_nxv1f16_f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv1f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f16.f16(
     <vscale x 1 x i1> %0,
@@ -596,7 +596,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f16.f16(
 define <vscale x 2 x i1> @intrinsic_vmfne_mask_vf_nxv2f16_f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv2f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f16.f16(
     <vscale x 2 x i1> %0,
@@ -636,7 +636,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f16.f16(
 define <vscale x 4 x i1> @intrinsic_vmfne_mask_vf_nxv4f16_f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv4f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f16.f16(
     <vscale x 4 x i1> %0,
@@ -676,7 +676,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f16.f16(
 define <vscale x 8 x i1> @intrinsic_vmfne_mask_vf_nxv8f16_f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv8f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f16.f16(
     <vscale x 8 x i1> %0,
@@ -716,7 +716,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16f16.f16(
 define <vscale x 16 x i1> @intrinsic_vmfne_mask_vf_nxv16f16_f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv16f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16f16.f16(
     <vscale x 16 x i1> %0,
@@ -756,7 +756,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32.f32(
 define <vscale x 1 x i1> @intrinsic_vmfne_mask_vf_nxv1f32_f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv1f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32.f32(
     <vscale x 1 x i1> %0,
@@ -796,7 +796,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f32.f32(
 define <vscale x 2 x i1> @intrinsic_vmfne_mask_vf_nxv2f32_f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv2f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f32.f32(
     <vscale x 2 x i1> %0,
@@ -836,7 +836,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f32.f32(
 define <vscale x 4 x i1> @intrinsic_vmfne_mask_vf_nxv4f32_f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv4f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f32.f32(
     <vscale x 4 x i1> %0,
@@ -876,7 +876,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f32.f32(
 define <vscale x 8 x i1> @intrinsic_vmfne_mask_vf_nxv8f32_f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv8f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f32.f32(
     <vscale x 8 x i1> %0,
@@ -916,7 +916,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f64.f64(
 define <vscale x 1 x i1> @intrinsic_vmfne_mask_vf_nxv1f64_f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv1f64_f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f64.f64(
     <vscale x 1 x i1> %0,
@@ -956,7 +956,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f64.f64(
 define <vscale x 2 x i1> @intrinsic_vmfne_mask_vf_nxv2f64_f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv2f64_f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f64.f64(
     <vscale x 2 x i1> %0,
@@ -996,7 +996,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f64.f64(
 define <vscale x 4 x i1> @intrinsic_vmfne_mask_vf_nxv4f64_f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv4f64_f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f64.f64(
     <vscale x 4 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmin-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmin-rv32.ll
index 6f3fe35a36b9..3344d48822eb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmin-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmin-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vmin.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vmin.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vmin.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vmin_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vmin.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vmin.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vmin_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vmin.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vmin.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vmin_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vmin.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vmin.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vmin_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vmin.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vmin.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vmin_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vmin.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vmin.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vmin_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv64i8_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vmin.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -308,7 +308,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vmin.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vmin_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vmin.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vmin.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vmin_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vmin.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vmin.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vmin_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vmin.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vmin.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vmin_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vmin.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vmin.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vmin_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vmin.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vmin.mask.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vmin_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv32i16_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vmin.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vmin.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vmin_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vmin.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -588,7 +588,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vmin.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vmin_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vmin.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -628,7 +628,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vmin.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vmin_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vmin.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -668,7 +668,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vmin.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vmin_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vmin.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -708,7 +708,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vmin.mask.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vmin_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv16i32_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vmin.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vmin.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vmin_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vmin.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -788,7 +788,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vmin.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vmin_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vmin.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -828,7 +828,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vmin.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vmin_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vmin.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -868,7 +868,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vmin.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vmin_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vmin.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -908,7 +908,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vmin.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vmin_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vmin.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -948,7 +948,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vmin.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vmin_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vmin.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -988,7 +988,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vmin.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vmin_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vmin.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vmin.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vmin_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vmin.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vmin.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vmin_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vmin.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vmin.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vmin_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vmin.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vmin.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vmin_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vmin.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vmin.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vmin_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vmin.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1228,7 +1228,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vmin.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vmin_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vmin.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1268,7 +1268,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vmin.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vmin_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vmin.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1308,7 +1308,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vmin.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vmin_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vmin.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1348,7 +1348,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vmin.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vmin_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vmin.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1388,7 +1388,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vmin.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vmin_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vmin.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1428,7 +1428,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vmin.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vmin_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vmin.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmin-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmin-rv64.ll
index f1be40c9fe19..10149166829a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmin-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmin-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vmin.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vmin.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vmin.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vmin_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vmin.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vmin.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vmin_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vmin.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vmin.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vmin_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vmin.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vmin.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vmin_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vmin.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vmin.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vmin_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vmin.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vmin.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vmin_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv64i8_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vmin.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -308,7 +308,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vmin.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vmin_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vmin.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vmin.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vmin_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vmin.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vmin.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vmin_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vmin.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vmin.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vmin_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vmin.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vmin.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vmin_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vmin.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vmin.mask.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vmin_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv32i16_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vmin.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vmin.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vmin_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vmin.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -588,7 +588,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vmin.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vmin_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vmin.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -628,7 +628,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vmin.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vmin_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vmin.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -668,7 +668,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vmin.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vmin_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vmin.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -708,7 +708,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vmin.mask.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vmin_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv16i32_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vmin.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vmin.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vmin_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i64_nxv1i64_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vmin.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -788,7 +788,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vmin.mask.nxv2i64.nxv2i64(
 define <vscale x 2 x i64> @intrinsic_vmin_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv2i64_nxv2i64_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vmin.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -828,7 +828,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vmin.mask.nxv4i64.nxv4i64(
 define <vscale x 4 x i64> @intrinsic_vmin_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv4i64_nxv4i64_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vmin.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -868,7 +868,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vmin.mask.nxv8i64.nxv8i64(
 define <vscale x 8 x i64> @intrinsic_vmin_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv8i64_nxv8i64_nxv8i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vmin.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -908,7 +908,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vmin.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vmin_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vmin.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -948,7 +948,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vmin.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vmin_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vmin.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -988,7 +988,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vmin.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vmin_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vmin.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vmin.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vmin_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vmin.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vmin.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vmin_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vmin.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vmin.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vmin_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vmin.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vmin.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vmin_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vmin.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vmin.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vmin_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vmin.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1228,7 +1228,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vmin.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vmin_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vmin.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1268,7 +1268,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vmin.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vmin_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vmin.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1308,7 +1308,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vmin.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vmin_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vmin.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1348,7 +1348,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vmin.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vmin_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vmin.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1388,7 +1388,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vmin.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vmin_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vmin.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1428,7 +1428,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vmin.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vmin_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vmin.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1468,7 +1468,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vmin.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vmin_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vmin.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1508,7 +1508,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vmin.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vmin_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vmin.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1548,7 +1548,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vmin.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vmin_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vmin.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1588,7 +1588,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vmin.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vmin_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vmin.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1628,7 +1628,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vmin.mask.nxv1i64.i64(
 define <vscale x 1 x i64> @intrinsic_vmin_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv1i64_nxv1i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vmin.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1668,7 +1668,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vmin.mask.nxv2i64.i64(
 define <vscale x 2 x i64> @intrinsic_vmin_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv2i64_nxv2i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vmin.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1708,7 +1708,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vmin.mask.nxv4i64.i64(
 define <vscale x 4 x i64> @intrinsic_vmin_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv4i64_nxv4i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vmin.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1748,7 +1748,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vmin.mask.nxv8i64.i64(
 define <vscale x 8 x i64> @intrinsic_vmin_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv8i64_nxv8i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vmin.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vmin.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vminu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vminu-rv32.ll
index 4f5f37d2538c..5e4b9470df09 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vminu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vminu-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vminu.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vminu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vminu.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vminu.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vminu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vminu.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vminu.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vminu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vminu.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vminu.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vminu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vminu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vminu.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vminu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vminu.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vminu.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vminu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vminu.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vminu.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vminu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv64i8_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vminu.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -308,7 +308,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vminu.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vminu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vminu.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vminu.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vminu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vminu.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vminu.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vminu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vminu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vminu.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vminu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vminu.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vminu.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vminu_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vminu.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vminu.mask.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vminu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv32i16_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vminu.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vminu.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vminu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vminu.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -588,7 +588,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vminu.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vminu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vminu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -628,7 +628,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vminu.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vminu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vminu.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -668,7 +668,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vminu.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vminu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vminu.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -708,7 +708,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vminu.mask.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vminu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv16i32_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vminu.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vminu.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vminu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vminu.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -788,7 +788,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vminu.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vminu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vminu.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -828,7 +828,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vminu.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vminu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vminu.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -868,7 +868,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vminu.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vminu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vminu.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -908,7 +908,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vminu.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vminu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vminu.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -948,7 +948,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vminu.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vminu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vminu.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -988,7 +988,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vminu.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vminu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vminu.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vminu.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vminu_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vminu.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vminu.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vminu_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vminu.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vminu.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vminu_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vminu.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vminu.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vminu_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vminu.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vminu.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vminu_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vminu.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1228,7 +1228,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vminu.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vminu_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vminu.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1268,7 +1268,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vminu.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vminu_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vminu.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1308,7 +1308,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vminu.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vminu_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vminu.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1348,7 +1348,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vminu.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vminu_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vminu.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1388,7 +1388,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vminu.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vminu_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vminu.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1428,7 +1428,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vminu.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vminu_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vminu.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vminu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vminu-rv64.ll
index 51edd12a9e16..79e9204d346f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vminu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vminu-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vminu.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vminu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vminu.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vminu.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vminu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vminu.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vminu.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vminu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vminu.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vminu.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vminu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vminu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vminu.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vminu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vminu.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vminu.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vminu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vminu.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vminu.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vminu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv64i8_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vminu.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -308,7 +308,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vminu.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vminu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vminu.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vminu.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vminu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vminu.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vminu.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vminu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vminu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vminu.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vminu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vminu.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vminu.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vminu_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vminu.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vminu.mask.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vminu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv32i16_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vminu.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vminu.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vminu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vminu.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -588,7 +588,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vminu.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vminu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vminu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -628,7 +628,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vminu.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vminu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vminu.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -668,7 +668,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vminu.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vminu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vminu.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -708,7 +708,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vminu.mask.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vminu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv16i32_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vminu.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vminu.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vminu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv1i64_nxv1i64_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vminu.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -788,7 +788,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vminu.mask.nxv2i64.nxv2i64(
 define <vscale x 2 x i64> @intrinsic_vminu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv2i64_nxv2i64_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vminu.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -828,7 +828,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vminu.mask.nxv4i64.nxv4i64(
 define <vscale x 4 x i64> @intrinsic_vminu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv4i64_nxv4i64_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vminu.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -868,7 +868,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vminu.mask.nxv8i64.nxv8i64(
 define <vscale x 8 x i64> @intrinsic_vminu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv8i64_nxv8i64_nxv8i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vminu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vminu.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -908,7 +908,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vminu.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vminu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vminu.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -948,7 +948,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vminu.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vminu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vminu.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -988,7 +988,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vminu.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vminu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vminu.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vminu.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vminu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vminu.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vminu.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vminu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vminu.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vminu.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vminu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vminu.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vminu.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vminu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vminu.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vminu.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vminu_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vminu.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1228,7 +1228,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vminu.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vminu_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vminu.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1268,7 +1268,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vminu.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vminu_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vminu.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1308,7 +1308,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vminu.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vminu_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vminu.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1348,7 +1348,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vminu.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vminu_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vminu.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1388,7 +1388,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vminu.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vminu_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vminu.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1428,7 +1428,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vminu.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vminu_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vminu.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1468,7 +1468,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vminu.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vminu_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vminu.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1508,7 +1508,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vminu.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vminu_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vminu.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1548,7 +1548,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vminu.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vminu_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vminu.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1588,7 +1588,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vminu.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vminu_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vminu.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1628,7 +1628,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vminu.mask.nxv1i64.i64(
 define <vscale x 1 x i64> @intrinsic_vminu_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv1i64_nxv1i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vminu.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1668,7 +1668,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vminu.mask.nxv2i64.i64(
 define <vscale x 2 x i64> @intrinsic_vminu_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv2i64_nxv2i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vminu.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1708,7 +1708,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vminu.mask.nxv4i64.i64(
 define <vscale x 4 x i64> @intrinsic_vminu_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv4i64_nxv4i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vminu.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1748,7 +1748,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vminu.mask.nxv8i64.i64(
 define <vscale x 8 x i64> @intrinsic_vminu_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv8i64_nxv8i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vminu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vminu.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsbf-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbf-rv32.ll
index 8be6aab802d8..100ecbdb09f3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsbf-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsbf-rv32.ll
@@ -29,7 +29,7 @@ define <vscale x 1 x i1> @intrinsic_vmsbf_mask_m_nxv1i1_nxv1i1(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv1i1_nxv1i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v17
 ; CHECK-NEXT:    vmsbf.m v25, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -71,7 +71,7 @@ define <vscale x 2 x i1> @intrinsic_vmsbf_mask_m_nxv2i1_nxv2i1(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv2i1_nxv2i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v17
 ; CHECK-NEXT:    vmsbf.m v25, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -113,7 +113,7 @@ define <vscale x 4 x i1> @intrinsic_vmsbf_mask_m_nxv4i1_nxv4i1(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv4i1_nxv4i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v17
 ; CHECK-NEXT:    vmsbf.m v25, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -155,7 +155,7 @@ define <vscale x 8 x i1> @intrinsic_vmsbf_mask_m_nxv8i1_nxv8i1(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv8i1_nxv8i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v17
 ; CHECK-NEXT:    vmsbf.m v25, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -197,7 +197,7 @@ define <vscale x 16 x i1> @intrinsic_vmsbf_mask_m_nxv16i1_nxv16i1(<vscale x 16 x
 ; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv16i1_nxv16i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v17
 ; CHECK-NEXT:    vmsbf.m v25, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -239,7 +239,7 @@ define <vscale x 32 x i1> @intrinsic_vmsbf_mask_m_nxv32i1_nxv32i1(<vscale x 32 x
 ; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv32i1_nxv32i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v17
 ; CHECK-NEXT:    vmsbf.m v25, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -281,7 +281,7 @@ define <vscale x 64 x i1> @intrinsic_vmsbf_mask_m_nxv64i1_nxv64i1(<vscale x 64 x
 ; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv64i1_nxv64i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v17
 ; CHECK-NEXT:    vmsbf.m v25, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsbf-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbf-rv64.ll
index e956d703518d..b7cf0533406a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsbf-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsbf-rv64.ll
@@ -29,7 +29,7 @@ define <vscale x 1 x i1> @intrinsic_vmsbf_mask_m_nxv1i1_nxv1i1(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv1i1_nxv1i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v17
 ; CHECK-NEXT:    vmsbf.m v25, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -71,7 +71,7 @@ define <vscale x 2 x i1> @intrinsic_vmsbf_mask_m_nxv2i1_nxv2i1(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv2i1_nxv2i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v17
 ; CHECK-NEXT:    vmsbf.m v25, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -113,7 +113,7 @@ define <vscale x 4 x i1> @intrinsic_vmsbf_mask_m_nxv4i1_nxv4i1(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv4i1_nxv4i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v17
 ; CHECK-NEXT:    vmsbf.m v25, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -155,7 +155,7 @@ define <vscale x 8 x i1> @intrinsic_vmsbf_mask_m_nxv8i1_nxv8i1(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv8i1_nxv8i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v17
 ; CHECK-NEXT:    vmsbf.m v25, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -197,7 +197,7 @@ define <vscale x 16 x i1> @intrinsic_vmsbf_mask_m_nxv16i1_nxv16i1(<vscale x 16 x
 ; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv16i1_nxv16i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v17
 ; CHECK-NEXT:    vmsbf.m v25, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -239,7 +239,7 @@ define <vscale x 32 x i1> @intrinsic_vmsbf_mask_m_nxv32i1_nxv32i1(<vscale x 32 x
 ; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv32i1_nxv32i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v17
 ; CHECK-NEXT:    vmsbf.m v25, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -281,7 +281,7 @@ define <vscale x 64 x i1> @intrinsic_vmsbf_mask_m_nxv64i1_nxv64i1(<vscale x 64 x
 ; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv64i1_nxv64i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v17
 ; CHECK-NEXT:    vmsbf.m v25, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmseq-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmseq-rv32.ll
index bc96a2394d5e..4fb4e0d97557 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmseq-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmseq-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8(
 define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8(
     <vscale x 1 x i8> %1,
@@ -72,7 +72,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8(
 define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8(
     <vscale x 2 x i8> %1,
@@ -116,7 +116,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8(
 define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8(
     <vscale x 4 x i8> %1,
@@ -160,7 +160,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8(
 define <vscale x 8 x i1> @intrinsic_vmseq_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8(
     <vscale x 8 x i8> %1,
@@ -204,7 +204,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8(
 define <vscale x 16 x i1> @intrinsic_vmseq_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8(
     <vscale x 16 x i8> %1,
@@ -248,7 +248,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8(
 define <vscale x 32 x i1> @intrinsic_vmseq_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8(
     <vscale x 32 x i8> %1,
@@ -292,7 +292,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16(
 define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16(
     <vscale x 1 x i16> %1,
@@ -336,7 +336,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16(
 define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16(
     <vscale x 2 x i16> %1,
@@ -380,7 +380,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16(
 define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16(
     <vscale x 4 x i16> %1,
@@ -424,7 +424,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16(
 define <vscale x 8 x i1> @intrinsic_vmseq_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16(
     <vscale x 8 x i16> %1,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16(
 define <vscale x 16 x i1> @intrinsic_vmseq_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16(
     <vscale x 16 x i16> %1,
@@ -512,7 +512,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32(
 define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32(
     <vscale x 1 x i32> %1,
@@ -556,7 +556,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32(
 define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32(
     <vscale x 2 x i32> %1,
@@ -600,7 +600,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32(
 define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32(
     <vscale x 4 x i32> %1,
@@ -644,7 +644,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32(
 define <vscale x 8 x i1> @intrinsic_vmseq_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32(
     <vscale x 8 x i32> %1,
@@ -688,7 +688,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8.i8(
 define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -728,7 +728,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8.i8(
 define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -768,7 +768,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8.i8(
 define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -808,7 +808,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8.i8(
 define <vscale x 8 x i1> @intrinsic_vmseq_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -848,7 +848,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8.i8(
 define <vscale x 16 x i1> @intrinsic_vmseq_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -888,7 +888,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8.i8(
 define <vscale x 32 x i1> @intrinsic_vmseq_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -928,7 +928,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16.i16(
 define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -968,7 +968,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16.i16(
 define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -1008,7 +1008,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16.i16(
 define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -1048,7 +1048,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16.i16(
 define <vscale x 8 x i1> @intrinsic_vmseq_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -1088,7 +1088,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16.i16(
 define <vscale x 16 x i1> @intrinsic_vmseq_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -1128,7 +1128,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32(
 define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -1168,7 +1168,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32.i32(
 define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -1208,7 +1208,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32.i32(
 define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -1248,7 +1248,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32.i32(
 define <vscale x 8 x i1> @intrinsic_vmseq_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -1276,7 +1276,7 @@ entry:
 define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -1304,7 +1304,7 @@ entry:
 define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1332,7 +1332,7 @@ entry:
 define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1360,7 +1360,7 @@ entry:
 define <vscale x 8 x i1> @intrinsic_vmseq_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1388,7 +1388,7 @@ entry:
 define <vscale x 16 x i1> @intrinsic_vmseq_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1416,7 +1416,7 @@ entry:
 define <vscale x 32 x i1> @intrinsic_vmseq_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1444,7 +1444,7 @@ entry:
 define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -1472,7 +1472,7 @@ entry:
 define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -1500,7 +1500,7 @@ entry:
 define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -1528,7 +1528,7 @@ entry:
 define <vscale x 8 x i1> @intrinsic_vmseq_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -1556,7 +1556,7 @@ entry:
 define <vscale x 16 x i1> @intrinsic_vmseq_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -1584,7 +1584,7 @@ entry:
 define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -1612,7 +1612,7 @@ entry:
 define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -1640,7 +1640,7 @@ entry:
 define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -1668,7 +1668,7 @@ entry:
 define <vscale x 8 x i1> @intrinsic_vmseq_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmseq-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmseq-rv64.ll
index 7e7fc156096a..3d7bc0d166f3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmseq-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmseq-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8(
 define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8(
     <vscale x 1 x i8> %1,
@@ -72,7 +72,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8(
 define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8(
     <vscale x 2 x i8> %1,
@@ -116,7 +116,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8(
 define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8(
     <vscale x 4 x i8> %1,
@@ -160,7 +160,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8(
 define <vscale x 8 x i1> @intrinsic_vmseq_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8(
     <vscale x 8 x i8> %1,
@@ -204,7 +204,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8(
 define <vscale x 16 x i1> @intrinsic_vmseq_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8(
     <vscale x 16 x i8> %1,
@@ -248,7 +248,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8(
 define <vscale x 32 x i1> @intrinsic_vmseq_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8(
     <vscale x 32 x i8> %1,
@@ -292,7 +292,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16(
 define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16(
     <vscale x 1 x i16> %1,
@@ -336,7 +336,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16(
 define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16(
     <vscale x 2 x i16> %1,
@@ -380,7 +380,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16(
 define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16(
     <vscale x 4 x i16> %1,
@@ -424,7 +424,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16(
 define <vscale x 8 x i1> @intrinsic_vmseq_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16(
     <vscale x 8 x i16> %1,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16(
 define <vscale x 16 x i1> @intrinsic_vmseq_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16(
     <vscale x 16 x i16> %1,
@@ -512,7 +512,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32(
 define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32(
     <vscale x 1 x i32> %1,
@@ -556,7 +556,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32(
 define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32(
     <vscale x 2 x i32> %1,
@@ -600,7 +600,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32(
 define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32(
     <vscale x 4 x i32> %1,
@@ -644,7 +644,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32(
 define <vscale x 8 x i1> @intrinsic_vmseq_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32(
     <vscale x 8 x i32> %1,
@@ -688,7 +688,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64(
 define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i64_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64(
     <vscale x 1 x i64> %1,
@@ -732,7 +732,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64(
 define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i64_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64(
     <vscale x 2 x i64> %1,
@@ -776,7 +776,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64(
 define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i64_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64(
     <vscale x 4 x i64> %1,
@@ -820,7 +820,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8.i8(
 define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -860,7 +860,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8.i8(
 define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -900,7 +900,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8.i8(
 define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -940,7 +940,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8.i8(
 define <vscale x 8 x i1> @intrinsic_vmseq_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -980,7 +980,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8.i8(
 define <vscale x 16 x i1> @intrinsic_vmseq_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1020,7 +1020,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8.i8(
 define <vscale x 32 x i1> @intrinsic_vmseq_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1060,7 +1060,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16.i16(
 define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -1100,7 +1100,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16.i16(
 define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -1140,7 +1140,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16.i16(
 define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -1180,7 +1180,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16.i16(
 define <vscale x 8 x i1> @intrinsic_vmseq_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -1220,7 +1220,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16.i16(
 define <vscale x 16 x i1> @intrinsic_vmseq_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -1260,7 +1260,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32(
 define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -1300,7 +1300,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32.i32(
 define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -1340,7 +1340,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32.i32(
 define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -1380,7 +1380,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32.i32(
 define <vscale x 8 x i1> @intrinsic_vmseq_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -1420,7 +1420,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64.i64(
 define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -1460,7 +1460,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64.i64(
 define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -1500,7 +1500,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64.i64(
 define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
@@ -1528,7 +1528,7 @@ entry:
 define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -1556,7 +1556,7 @@ entry:
 define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1584,7 +1584,7 @@ entry:
 define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1612,7 +1612,7 @@ entry:
 define <vscale x 8 x i1> @intrinsic_vmseq_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1640,7 +1640,7 @@ entry:
 define <vscale x 16 x i1> @intrinsic_vmseq_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1668,7 +1668,7 @@ entry:
 define <vscale x 32 x i1> @intrinsic_vmseq_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1696,7 +1696,7 @@ entry:
 define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -1724,7 +1724,7 @@ entry:
 define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -1752,7 +1752,7 @@ entry:
 define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -1780,7 +1780,7 @@ entry:
 define <vscale x 8 x i1> @intrinsic_vmseq_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -1808,7 +1808,7 @@ entry:
 define <vscale x 16 x i1> @intrinsic_vmseq_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -1836,7 +1836,7 @@ entry:
 define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -1864,7 +1864,7 @@ entry:
 define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -1892,7 +1892,7 @@ entry:
 define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -1920,7 +1920,7 @@ entry:
 define <vscale x 8 x i1> @intrinsic_vmseq_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -1948,7 +1948,7 @@ entry:
 define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -1976,7 +1976,7 @@ entry:
 define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -2004,7 +2004,7 @@ entry:
 define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv32.ll
index 13d77814f972..283c8a0f5b60 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i8.i8(
 define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i8.i8(
 define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i8.i8(
 define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i8.i8(
 define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i8.i8(
 define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsgt.mask.nxv32i8.i8(
 define <vscale x 32 x i1> @intrinsic_vmsgt_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgt.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -268,7 +268,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i16.i16(
 define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -308,7 +308,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i16.i16(
 define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -348,7 +348,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i16.i16(
 define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -388,7 +388,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i16.i16(
 define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -428,7 +428,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i16.i16(
 define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -468,7 +468,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.i32(
 define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -508,7 +508,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i32.i32(
 define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -548,7 +548,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i32.i32(
 define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -588,7 +588,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i32.i32(
 define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -616,7 +616,7 @@ entry:
 define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -644,7 +644,7 @@ entry:
 define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -672,7 +672,7 @@ entry:
 define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -700,7 +700,7 @@ entry:
 define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -728,7 +728,7 @@ entry:
 define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -756,7 +756,7 @@ entry:
 define <vscale x 32 x i1> @intrinsic_vmsgt_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgt.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -784,7 +784,7 @@ entry:
 define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -812,7 +812,7 @@ entry:
 define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -840,7 +840,7 @@ entry:
 define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -868,7 +868,7 @@ entry:
 define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -896,7 +896,7 @@ entry:
 define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -924,7 +924,7 @@ entry:
 define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -952,7 +952,7 @@ entry:
 define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -980,7 +980,7 @@ entry:
 define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -1008,7 +1008,7 @@ entry:
 define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv64.ll
index 3a05a5e04c6b..2c59327afe20 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i8.i8(
 define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i8.i8(
 define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i8.i8(
 define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i8.i8(
 define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i8.i8(
 define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsgt.mask.nxv32i8.i8(
 define <vscale x 32 x i1> @intrinsic_vmsgt_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgt.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -268,7 +268,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i16.i16(
 define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -308,7 +308,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i16.i16(
 define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -348,7 +348,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i16.i16(
 define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -388,7 +388,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i16.i16(
 define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -428,7 +428,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i16.i16(
 define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -468,7 +468,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.i32(
 define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -508,7 +508,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i32.i32(
 define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -548,7 +548,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i32.i32(
 define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -588,7 +588,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i32.i32(
 define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -628,7 +628,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i64.i64(
 define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -668,7 +668,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i64.i64(
 define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -708,7 +708,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i64.i64(
 define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
@@ -736,7 +736,7 @@ entry:
 define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -764,7 +764,7 @@ entry:
 define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -792,7 +792,7 @@ entry:
 define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -820,7 +820,7 @@ entry:
 define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -848,7 +848,7 @@ entry:
 define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -876,7 +876,7 @@ entry:
 define <vscale x 32 x i1> @intrinsic_vmsgt_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgt.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -904,7 +904,7 @@ entry:
 define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -932,7 +932,7 @@ entry:
 define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -960,7 +960,7 @@ entry:
 define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -988,7 +988,7 @@ entry:
 define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -1016,7 +1016,7 @@ entry:
 define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -1044,7 +1044,7 @@ entry:
 define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -1072,7 +1072,7 @@ entry:
 define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -1100,7 +1100,7 @@ entry:
 define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -1128,7 +1128,7 @@ entry:
 define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -1156,7 +1156,7 @@ entry:
 define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -1184,7 +1184,7 @@ entry:
 define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -1212,7 +1212,7 @@ entry:
 define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv32.ll
index 6cd44a8d2d89..8de514090cf9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i8.i8(
 define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i8.i8(
 define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i8.i8(
 define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i8.i8(
 define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i8.i8(
 define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsgtu.mask.nxv32i8.i8(
 define <vscale x 32 x i1> @intrinsic_vmsgtu_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -268,7 +268,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i16.i16(
 define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -308,7 +308,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i16.i16(
 define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -348,7 +348,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i16.i16(
 define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -388,7 +388,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i16.i16(
 define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -428,7 +428,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i16.i16(
 define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -468,7 +468,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.i32(
 define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -508,7 +508,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i32.i32(
 define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -548,7 +548,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i32.i32(
 define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -588,7 +588,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i32.i32(
 define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -616,7 +616,7 @@ entry:
 define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -644,7 +644,7 @@ entry:
 define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -672,7 +672,7 @@ entry:
 define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -700,7 +700,7 @@ entry:
 define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -728,7 +728,7 @@ entry:
 define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -756,7 +756,7 @@ entry:
 define <vscale x 32 x i1> @intrinsic_vmsgtu_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -784,7 +784,7 @@ entry:
 define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -812,7 +812,7 @@ entry:
 define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -840,7 +840,7 @@ entry:
 define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -868,7 +868,7 @@ entry:
 define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -896,7 +896,7 @@ entry:
 define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -924,7 +924,7 @@ entry:
 define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -952,7 +952,7 @@ entry:
 define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -980,7 +980,7 @@ entry:
 define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -1008,7 +1008,7 @@ entry:
 define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv64.ll
index ca2d4c3c2156..976629fc4c60 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i8.i8(
 define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i8.i8(
 define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i8.i8(
 define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i8.i8(
 define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i8.i8(
 define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsgtu.mask.nxv32i8.i8(
 define <vscale x 32 x i1> @intrinsic_vmsgtu_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -268,7 +268,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i16.i16(
 define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -308,7 +308,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i16.i16(
 define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -348,7 +348,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i16.i16(
 define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -388,7 +388,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i16.i16(
 define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -428,7 +428,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i16.i16(
 define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -468,7 +468,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.i32(
 define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -508,7 +508,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i32.i32(
 define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -548,7 +548,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i32.i32(
 define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -588,7 +588,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i32.i32(
 define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -628,7 +628,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i64.i64(
 define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -668,7 +668,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i64.i64(
 define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -708,7 +708,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i64.i64(
 define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
@@ -736,7 +736,7 @@ entry:
 define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -764,7 +764,7 @@ entry:
 define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -792,7 +792,7 @@ entry:
 define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -820,7 +820,7 @@ entry:
 define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -848,7 +848,7 @@ entry:
 define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -876,7 +876,7 @@ entry:
 define <vscale x 32 x i1> @intrinsic_vmsgtu_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -904,7 +904,7 @@ entry:
 define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -932,7 +932,7 @@ entry:
 define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -960,7 +960,7 @@ entry:
 define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -988,7 +988,7 @@ entry:
 define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -1016,7 +1016,7 @@ entry:
 define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -1044,7 +1044,7 @@ entry:
 define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -1072,7 +1072,7 @@ entry:
 define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -1100,7 +1100,7 @@ entry:
 define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -1128,7 +1128,7 @@ entry:
 define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -1156,7 +1156,7 @@ entry:
 define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -1184,7 +1184,7 @@ entry:
 define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -1212,7 +1212,7 @@ entry:
 define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsif-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsif-rv32.ll
index 521a256f19de..fab8ec107964 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsif-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsif-rv32.ll
@@ -29,7 +29,7 @@ define <vscale x 1 x i1> @intrinsic_vmsif_mask_m_nxv1i1_nxv1i1(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv1i1_nxv1i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v17
 ; CHECK-NEXT:    vmsif.m v25, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -71,7 +71,7 @@ define <vscale x 2 x i1> @intrinsic_vmsif_mask_m_nxv2i1_nxv2i1(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv2i1_nxv2i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v17
 ; CHECK-NEXT:    vmsif.m v25, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -113,7 +113,7 @@ define <vscale x 4 x i1> @intrinsic_vmsif_mask_m_nxv4i1_nxv4i1(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv4i1_nxv4i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v17
 ; CHECK-NEXT:    vmsif.m v25, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -155,7 +155,7 @@ define <vscale x 8 x i1> @intrinsic_vmsif_mask_m_nxv8i1_nxv8i1(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv8i1_nxv8i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v17
 ; CHECK-NEXT:    vmsif.m v25, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -197,7 +197,7 @@ define <vscale x 16 x i1> @intrinsic_vmsif_mask_m_nxv16i1_nxv16i1(<vscale x 16 x
 ; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv16i1_nxv16i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v17
 ; CHECK-NEXT:    vmsif.m v25, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -239,7 +239,7 @@ define <vscale x 32 x i1> @intrinsic_vmsif_mask_m_nxv32i1_nxv32i1(<vscale x 32 x
 ; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv32i1_nxv32i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v17
 ; CHECK-NEXT:    vmsif.m v25, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -281,7 +281,7 @@ define <vscale x 64 x i1> @intrinsic_vmsif_mask_m_nxv64i1_nxv64i1(<vscale x 64 x
 ; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv64i1_nxv64i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v17
 ; CHECK-NEXT:    vmsif.m v25, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsif-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsif-rv64.ll
index 98c881cbaec3..8d6e88a0d3be 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsif-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsif-rv64.ll
@@ -29,7 +29,7 @@ define <vscale x 1 x i1> @intrinsic_vmsif_mask_m_nxv1i1_nxv1i1(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv1i1_nxv1i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v17
 ; CHECK-NEXT:    vmsif.m v25, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -71,7 +71,7 @@ define <vscale x 2 x i1> @intrinsic_vmsif_mask_m_nxv2i1_nxv2i1(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv2i1_nxv2i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v17
 ; CHECK-NEXT:    vmsif.m v25, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -113,7 +113,7 @@ define <vscale x 4 x i1> @intrinsic_vmsif_mask_m_nxv4i1_nxv4i1(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv4i1_nxv4i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v17
 ; CHECK-NEXT:    vmsif.m v25, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -155,7 +155,7 @@ define <vscale x 8 x i1> @intrinsic_vmsif_mask_m_nxv8i1_nxv8i1(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv8i1_nxv8i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v17
 ; CHECK-NEXT:    vmsif.m v25, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -197,7 +197,7 @@ define <vscale x 16 x i1> @intrinsic_vmsif_mask_m_nxv16i1_nxv16i1(<vscale x 16 x
 ; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv16i1_nxv16i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v17
 ; CHECK-NEXT:    vmsif.m v25, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -239,7 +239,7 @@ define <vscale x 32 x i1> @intrinsic_vmsif_mask_m_nxv32i1_nxv32i1(<vscale x 32 x
 ; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv32i1_nxv32i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v17
 ; CHECK-NEXT:    vmsif.m v25, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -281,7 +281,7 @@ define <vscale x 64 x i1> @intrinsic_vmsif_mask_m_nxv64i1_nxv64i1(<vscale x 64 x
 ; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv64i1_nxv64i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v17
 ; CHECK-NEXT:    vmsif.m v25, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsle-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsle-rv32.ll
index e86b0ef34494..ca9df4464a59 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsle-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsle-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8(
 define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8(
     <vscale x 1 x i8> %1,
@@ -72,7 +72,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8(
 define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8(
     <vscale x 2 x i8> %1,
@@ -116,7 +116,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8(
 define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8(
     <vscale x 4 x i8> %1,
@@ -160,7 +160,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8(
 define <vscale x 8 x i1> @intrinsic_vmsle_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8(
     <vscale x 8 x i8> %1,
@@ -204,7 +204,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8(
 define <vscale x 16 x i1> @intrinsic_vmsle_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8(
     <vscale x 16 x i8> %1,
@@ -248,7 +248,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8(
 define <vscale x 32 x i1> @intrinsic_vmsle_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8(
     <vscale x 32 x i8> %1,
@@ -292,7 +292,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16(
 define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16(
     <vscale x 1 x i16> %1,
@@ -336,7 +336,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16(
 define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16(
     <vscale x 2 x i16> %1,
@@ -380,7 +380,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16(
 define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16(
     <vscale x 4 x i16> %1,
@@ -424,7 +424,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16(
 define <vscale x 8 x i1> @intrinsic_vmsle_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16(
     <vscale x 8 x i16> %1,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16(
 define <vscale x 16 x i1> @intrinsic_vmsle_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16(
     <vscale x 16 x i16> %1,
@@ -512,7 +512,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32(
 define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32(
     <vscale x 1 x i32> %1,
@@ -556,7 +556,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32(
 define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32(
     <vscale x 2 x i32> %1,
@@ -600,7 +600,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32(
 define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32(
     <vscale x 4 x i32> %1,
@@ -644,7 +644,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32(
 define <vscale x 8 x i1> @intrinsic_vmsle_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32(
     <vscale x 8 x i32> %1,
@@ -688,7 +688,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8.i8(
 define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -728,7 +728,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8.i8(
 define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -768,7 +768,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8.i8(
 define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -808,7 +808,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8.i8(
 define <vscale x 8 x i1> @intrinsic_vmsle_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -848,7 +848,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8.i8(
 define <vscale x 16 x i1> @intrinsic_vmsle_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -888,7 +888,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8.i8(
 define <vscale x 32 x i1> @intrinsic_vmsle_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -928,7 +928,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16.i16(
 define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -968,7 +968,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16.i16(
 define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -1008,7 +1008,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16.i16(
 define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -1048,7 +1048,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16.i16(
 define <vscale x 8 x i1> @intrinsic_vmsle_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -1088,7 +1088,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16.i16(
 define <vscale x 16 x i1> @intrinsic_vmsle_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -1128,7 +1128,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.i32(
 define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -1168,7 +1168,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32.i32(
 define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -1208,7 +1208,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32.i32(
 define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -1248,7 +1248,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32.i32(
 define <vscale x 8 x i1> @intrinsic_vmsle_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -1276,7 +1276,7 @@ entry:
 define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -1304,7 +1304,7 @@ entry:
 define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1332,7 +1332,7 @@ entry:
 define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1360,7 +1360,7 @@ entry:
 define <vscale x 8 x i1> @intrinsic_vmsle_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1388,7 +1388,7 @@ entry:
 define <vscale x 16 x i1> @intrinsic_vmsle_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1416,7 +1416,7 @@ entry:
 define <vscale x 32 x i1> @intrinsic_vmsle_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1444,7 +1444,7 @@ entry:
 define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -1472,7 +1472,7 @@ entry:
 define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -1500,7 +1500,7 @@ entry:
 define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -1528,7 +1528,7 @@ entry:
 define <vscale x 8 x i1> @intrinsic_vmsle_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -1556,7 +1556,7 @@ entry:
 define <vscale x 16 x i1> @intrinsic_vmsle_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -1584,7 +1584,7 @@ entry:
 define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -1612,7 +1612,7 @@ entry:
 define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -1640,7 +1640,7 @@ entry:
 define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -1668,7 +1668,7 @@ entry:
 define <vscale x 8 x i1> @intrinsic_vmsle_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsle-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsle-rv64.ll
index 2f33fb15d0fd..6026e9ee9c93 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsle-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsle-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8(
 define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8(
     <vscale x 1 x i8> %1,
@@ -72,7 +72,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8(
 define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8(
     <vscale x 2 x i8> %1,
@@ -116,7 +116,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8(
 define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8(
     <vscale x 4 x i8> %1,
@@ -160,7 +160,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8(
 define <vscale x 8 x i1> @intrinsic_vmsle_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8(
     <vscale x 8 x i8> %1,
@@ -204,7 +204,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8(
 define <vscale x 16 x i1> @intrinsic_vmsle_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8(
     <vscale x 16 x i8> %1,
@@ -248,7 +248,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8(
 define <vscale x 32 x i1> @intrinsic_vmsle_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8(
     <vscale x 32 x i8> %1,
@@ -292,7 +292,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16(
 define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16(
     <vscale x 1 x i16> %1,
@@ -336,7 +336,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16(
 define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16(
     <vscale x 2 x i16> %1,
@@ -380,7 +380,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16(
 define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16(
     <vscale x 4 x i16> %1,
@@ -424,7 +424,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16(
 define <vscale x 8 x i1> @intrinsic_vmsle_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16(
     <vscale x 8 x i16> %1,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16(
 define <vscale x 16 x i1> @intrinsic_vmsle_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16(
     <vscale x 16 x i16> %1,
@@ -512,7 +512,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32(
 define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32(
     <vscale x 1 x i32> %1,
@@ -556,7 +556,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32(
 define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32(
     <vscale x 2 x i32> %1,
@@ -600,7 +600,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32(
 define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32(
     <vscale x 4 x i32> %1,
@@ -644,7 +644,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32(
 define <vscale x 8 x i1> @intrinsic_vmsle_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32(
     <vscale x 8 x i32> %1,
@@ -688,7 +688,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i64(
 define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i64_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64(
     <vscale x 1 x i64> %1,
@@ -732,7 +732,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i64(
 define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i64_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64(
     <vscale x 2 x i64> %1,
@@ -776,7 +776,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i64(
 define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i64_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64(
     <vscale x 4 x i64> %1,
@@ -820,7 +820,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8.i8(
 define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -860,7 +860,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8.i8(
 define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -900,7 +900,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8.i8(
 define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -940,7 +940,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8.i8(
 define <vscale x 8 x i1> @intrinsic_vmsle_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -980,7 +980,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8.i8(
 define <vscale x 16 x i1> @intrinsic_vmsle_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1020,7 +1020,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8.i8(
 define <vscale x 32 x i1> @intrinsic_vmsle_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1060,7 +1060,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16.i16(
 define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -1100,7 +1100,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16.i16(
 define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -1140,7 +1140,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16.i16(
 define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -1180,7 +1180,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16.i16(
 define <vscale x 8 x i1> @intrinsic_vmsle_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -1220,7 +1220,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16.i16(
 define <vscale x 16 x i1> @intrinsic_vmsle_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -1260,7 +1260,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.i32(
 define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -1300,7 +1300,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32.i32(
 define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -1340,7 +1340,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32.i32(
 define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -1380,7 +1380,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32.i32(
 define <vscale x 8 x i1> @intrinsic_vmsle_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -1420,7 +1420,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i64.i64(
 define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -1460,7 +1460,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i64.i64(
 define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -1500,7 +1500,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i64.i64(
 define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
@@ -1528,7 +1528,7 @@ entry:
 define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -1556,7 +1556,7 @@ entry:
 define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1584,7 +1584,7 @@ entry:
 define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1612,7 +1612,7 @@ entry:
 define <vscale x 8 x i1> @intrinsic_vmsle_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1640,7 +1640,7 @@ entry:
 define <vscale x 16 x i1> @intrinsic_vmsle_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1668,7 +1668,7 @@ entry:
 define <vscale x 32 x i1> @intrinsic_vmsle_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1696,7 +1696,7 @@ entry:
 define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -1724,7 +1724,7 @@ entry:
 define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -1752,7 +1752,7 @@ entry:
 define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -1780,7 +1780,7 @@ entry:
 define <vscale x 8 x i1> @intrinsic_vmsle_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -1808,7 +1808,7 @@ entry:
 define <vscale x 16 x i1> @intrinsic_vmsle_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -1836,7 +1836,7 @@ entry:
 define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -1864,7 +1864,7 @@ entry:
 define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -1892,7 +1892,7 @@ entry:
 define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -1920,7 +1920,7 @@ entry:
 define <vscale x 8 x i1> @intrinsic_vmsle_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -1948,7 +1948,7 @@ entry:
 define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -1976,7 +1976,7 @@ entry:
 define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -2004,7 +2004,7 @@ entry:
 define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv32.ll
index 7ac727aa494e..6638c0858145 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8(
 define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8(
     <vscale x 1 x i8> %1,
@@ -72,7 +72,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8(
 define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8(
     <vscale x 2 x i8> %1,
@@ -116,7 +116,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8(
 define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8(
     <vscale x 4 x i8> %1,
@@ -160,7 +160,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8(
 define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8(
     <vscale x 8 x i8> %1,
@@ -204,7 +204,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8(
 define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8(
     <vscale x 16 x i8> %1,
@@ -248,7 +248,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8(
 define <vscale x 32 x i1> @intrinsic_vmsleu_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8(
     <vscale x 32 x i8> %1,
@@ -292,7 +292,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16(
 define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16(
     <vscale x 1 x i16> %1,
@@ -336,7 +336,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16(
 define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16(
     <vscale x 2 x i16> %1,
@@ -380,7 +380,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16(
 define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16(
     <vscale x 4 x i16> %1,
@@ -424,7 +424,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16(
 define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16(
     <vscale x 8 x i16> %1,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16(
 define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16(
     <vscale x 16 x i16> %1,
@@ -512,7 +512,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32(
 define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32(
     <vscale x 1 x i32> %1,
@@ -556,7 +556,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32(
 define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32(
     <vscale x 2 x i32> %1,
@@ -600,7 +600,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32(
 define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32(
     <vscale x 4 x i32> %1,
@@ -644,7 +644,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32(
 define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32(
     <vscale x 8 x i32> %1,
@@ -688,7 +688,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8.i8(
 define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -728,7 +728,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8.i8(
 define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -768,7 +768,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8.i8(
 define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -808,7 +808,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8.i8(
 define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -848,7 +848,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8.i8(
 define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -888,7 +888,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8.i8(
 define <vscale x 32 x i1> @intrinsic_vmsleu_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -928,7 +928,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16.i16(
 define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -968,7 +968,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16.i16(
 define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -1008,7 +1008,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16.i16(
 define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -1048,7 +1048,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16.i16(
 define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -1088,7 +1088,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16.i16(
 define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -1128,7 +1128,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.i32(
 define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -1168,7 +1168,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32.i32(
 define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -1208,7 +1208,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32.i32(
 define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -1248,7 +1248,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32.i32(
 define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -1276,7 +1276,7 @@ entry:
 define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -1304,7 +1304,7 @@ entry:
 define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1332,7 +1332,7 @@ entry:
 define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1360,7 +1360,7 @@ entry:
 define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1388,7 +1388,7 @@ entry:
 define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1416,7 +1416,7 @@ entry:
 define <vscale x 32 x i1> @intrinsic_vmsleu_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1444,7 +1444,7 @@ entry:
 define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -1472,7 +1472,7 @@ entry:
 define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -1500,7 +1500,7 @@ entry:
 define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -1528,7 +1528,7 @@ entry:
 define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -1556,7 +1556,7 @@ entry:
 define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -1584,7 +1584,7 @@ entry:
 define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -1612,7 +1612,7 @@ entry:
 define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -1640,7 +1640,7 @@ entry:
 define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -1668,7 +1668,7 @@ entry:
 define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv64.ll
index bc91e3f98696..510646b95654 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8(
 define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8(
     <vscale x 1 x i8> %1,
@@ -72,7 +72,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8(
 define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8(
     <vscale x 2 x i8> %1,
@@ -116,7 +116,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8(
 define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8(
     <vscale x 4 x i8> %1,
@@ -160,7 +160,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8(
 define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8(
     <vscale x 8 x i8> %1,
@@ -204,7 +204,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8(
 define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8(
     <vscale x 16 x i8> %1,
@@ -248,7 +248,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8(
 define <vscale x 32 x i1> @intrinsic_vmsleu_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8(
     <vscale x 32 x i8> %1,
@@ -292,7 +292,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16(
 define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16(
     <vscale x 1 x i16> %1,
@@ -336,7 +336,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16(
 define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16(
     <vscale x 2 x i16> %1,
@@ -380,7 +380,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16(
 define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16(
     <vscale x 4 x i16> %1,
@@ -424,7 +424,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16(
 define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16(
     <vscale x 8 x i16> %1,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16(
 define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16(
     <vscale x 16 x i16> %1,
@@ -512,7 +512,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32(
 define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32(
     <vscale x 1 x i32> %1,
@@ -556,7 +556,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32(
 define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32(
     <vscale x 2 x i32> %1,
@@ -600,7 +600,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32(
 define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32(
     <vscale x 4 x i32> %1,
@@ -644,7 +644,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32(
 define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32(
     <vscale x 8 x i32> %1,
@@ -688,7 +688,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i64(
 define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i64_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64(
     <vscale x 1 x i64> %1,
@@ -732,7 +732,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i64(
 define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i64_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64(
     <vscale x 2 x i64> %1,
@@ -776,7 +776,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i64(
 define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i64_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64(
     <vscale x 4 x i64> %1,
@@ -820,7 +820,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8.i8(
 define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -860,7 +860,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8.i8(
 define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -900,7 +900,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8.i8(
 define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -940,7 +940,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8.i8(
 define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -980,7 +980,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8.i8(
 define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1020,7 +1020,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8.i8(
 define <vscale x 32 x i1> @intrinsic_vmsleu_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1060,7 +1060,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16.i16(
 define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -1100,7 +1100,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16.i16(
 define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -1140,7 +1140,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16.i16(
 define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -1180,7 +1180,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16.i16(
 define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -1220,7 +1220,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16.i16(
 define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -1260,7 +1260,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.i32(
 define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -1300,7 +1300,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32.i32(
 define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -1340,7 +1340,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32.i32(
 define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -1380,7 +1380,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32.i32(
 define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -1420,7 +1420,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i64.i64(
 define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -1460,7 +1460,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i64.i64(
 define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -1500,7 +1500,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i64.i64(
 define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
@@ -1528,7 +1528,7 @@ entry:
 define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -1556,7 +1556,7 @@ entry:
 define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1584,7 +1584,7 @@ entry:
 define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1612,7 +1612,7 @@ entry:
 define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1640,7 +1640,7 @@ entry:
 define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1668,7 +1668,7 @@ entry:
 define <vscale x 32 x i1> @intrinsic_vmsleu_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1696,7 +1696,7 @@ entry:
 define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -1724,7 +1724,7 @@ entry:
 define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -1752,7 +1752,7 @@ entry:
 define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -1780,7 +1780,7 @@ entry:
 define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -1808,7 +1808,7 @@ entry:
 define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -1836,7 +1836,7 @@ entry:
 define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -1864,7 +1864,7 @@ entry:
 define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -1892,7 +1892,7 @@ entry:
 define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -1920,7 +1920,7 @@ entry:
 define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -1948,7 +1948,7 @@ entry:
 define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -1976,7 +1976,7 @@ entry:
 define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -2004,7 +2004,7 @@ entry:
 define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmslt-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmslt-rv32.ll
index 68997a6e4555..997feac5131f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmslt-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmslt-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8(
 define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8(
     <vscale x 1 x i8> %1,
@@ -72,7 +72,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8(
 define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8(
     <vscale x 2 x i8> %1,
@@ -116,7 +116,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8(
 define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8(
     <vscale x 4 x i8> %1,
@@ -160,7 +160,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8(
 define <vscale x 8 x i1> @intrinsic_vmslt_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8(
     <vscale x 8 x i8> %1,
@@ -204,7 +204,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8(
 define <vscale x 16 x i1> @intrinsic_vmslt_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8(
     <vscale x 16 x i8> %1,
@@ -248,7 +248,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8(
 define <vscale x 32 x i1> @intrinsic_vmslt_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8(
     <vscale x 32 x i8> %1,
@@ -292,7 +292,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16(
 define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16(
     <vscale x 1 x i16> %1,
@@ -336,7 +336,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16(
 define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16(
     <vscale x 2 x i16> %1,
@@ -380,7 +380,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16(
 define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16(
     <vscale x 4 x i16> %1,
@@ -424,7 +424,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16(
 define <vscale x 8 x i1> @intrinsic_vmslt_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16(
     <vscale x 8 x i16> %1,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16(
 define <vscale x 16 x i1> @intrinsic_vmslt_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16(
     <vscale x 16 x i16> %1,
@@ -512,7 +512,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32(
 define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32(
     <vscale x 1 x i32> %1,
@@ -556,7 +556,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32(
 define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32(
     <vscale x 2 x i32> %1,
@@ -600,7 +600,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32(
 define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32(
     <vscale x 4 x i32> %1,
@@ -644,7 +644,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32(
 define <vscale x 8 x i1> @intrinsic_vmslt_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32(
     <vscale x 8 x i32> %1,
@@ -688,7 +688,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8.i8(
 define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -728,7 +728,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8.i8(
 define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -768,7 +768,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8.i8(
 define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -808,7 +808,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8.i8(
 define <vscale x 8 x i1> @intrinsic_vmslt_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -848,7 +848,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8.i8(
 define <vscale x 16 x i1> @intrinsic_vmslt_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -888,7 +888,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8.i8(
 define <vscale x 32 x i1> @intrinsic_vmslt_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -928,7 +928,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16.i16(
 define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -968,7 +968,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16.i16(
 define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -1008,7 +1008,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16.i16(
 define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -1048,7 +1048,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16.i16(
 define <vscale x 8 x i1> @intrinsic_vmslt_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -1088,7 +1088,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16.i16(
 define <vscale x 16 x i1> @intrinsic_vmslt_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -1128,7 +1128,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.i32(
 define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -1168,7 +1168,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32.i32(
 define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -1208,7 +1208,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32.i32(
 define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -1248,7 +1248,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32.i32(
 define <vscale x 8 x i1> @intrinsic_vmslt_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmslt-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmslt-rv64.ll
index 78178c05c58b..da9797716842 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmslt-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmslt-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8(
 define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8(
     <vscale x 1 x i8> %1,
@@ -72,7 +72,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8(
 define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8(
     <vscale x 2 x i8> %1,
@@ -116,7 +116,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8(
 define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8(
     <vscale x 4 x i8> %1,
@@ -160,7 +160,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8(
 define <vscale x 8 x i1> @intrinsic_vmslt_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8(
     <vscale x 8 x i8> %1,
@@ -204,7 +204,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8(
 define <vscale x 16 x i1> @intrinsic_vmslt_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8(
     <vscale x 16 x i8> %1,
@@ -248,7 +248,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8(
 define <vscale x 32 x i1> @intrinsic_vmslt_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8(
     <vscale x 32 x i8> %1,
@@ -292,7 +292,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16(
 define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16(
     <vscale x 1 x i16> %1,
@@ -336,7 +336,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16(
 define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16(
     <vscale x 2 x i16> %1,
@@ -380,7 +380,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16(
 define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16(
     <vscale x 4 x i16> %1,
@@ -424,7 +424,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16(
 define <vscale x 8 x i1> @intrinsic_vmslt_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16(
     <vscale x 8 x i16> %1,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16(
 define <vscale x 16 x i1> @intrinsic_vmslt_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16(
     <vscale x 16 x i16> %1,
@@ -512,7 +512,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32(
 define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32(
     <vscale x 1 x i32> %1,
@@ -556,7 +556,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32(
 define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32(
     <vscale x 2 x i32> %1,
@@ -600,7 +600,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32(
 define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32(
     <vscale x 4 x i32> %1,
@@ -644,7 +644,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32(
 define <vscale x 8 x i1> @intrinsic_vmslt_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32(
     <vscale x 8 x i32> %1,
@@ -688,7 +688,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i64(
 define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i64_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64(
     <vscale x 1 x i64> %1,
@@ -732,7 +732,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i64(
 define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i64_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64(
     <vscale x 2 x i64> %1,
@@ -776,7 +776,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i64(
 define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i64_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64(
     <vscale x 4 x i64> %1,
@@ -820,7 +820,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8.i8(
 define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -860,7 +860,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8.i8(
 define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -900,7 +900,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8.i8(
 define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -940,7 +940,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8.i8(
 define <vscale x 8 x i1> @intrinsic_vmslt_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -980,7 +980,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8.i8(
 define <vscale x 16 x i1> @intrinsic_vmslt_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1020,7 +1020,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8.i8(
 define <vscale x 32 x i1> @intrinsic_vmslt_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1060,7 +1060,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16.i16(
 define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -1100,7 +1100,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16.i16(
 define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -1140,7 +1140,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16.i16(
 define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -1180,7 +1180,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16.i16(
 define <vscale x 8 x i1> @intrinsic_vmslt_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -1220,7 +1220,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16.i16(
 define <vscale x 16 x i1> @intrinsic_vmslt_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -1260,7 +1260,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.i32(
 define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -1300,7 +1300,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32.i32(
 define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -1340,7 +1340,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32.i32(
 define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -1380,7 +1380,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32.i32(
 define <vscale x 8 x i1> @intrinsic_vmslt_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -1420,7 +1420,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i64.i64(
 define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -1460,7 +1460,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i64.i64(
 define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -1500,7 +1500,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i64.i64(
 define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv32.ll
index 5bb1497604ed..797188edbb2f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i8(
 define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8(
     <vscale x 1 x i8> %1,
@@ -72,7 +72,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i8(
 define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8(
     <vscale x 2 x i8> %1,
@@ -116,7 +116,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i8(
 define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8(
     <vscale x 4 x i8> %1,
@@ -160,7 +160,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i8(
 define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8(
     <vscale x 8 x i8> %1,
@@ -204,7 +204,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i8(
 define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8(
     <vscale x 16 x i8> %1,
@@ -248,7 +248,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i8(
 define <vscale x 32 x i1> @intrinsic_vmsltu_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8(
     <vscale x 32 x i8> %1,
@@ -292,7 +292,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i16(
 define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16(
     <vscale x 1 x i16> %1,
@@ -336,7 +336,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i16(
 define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16(
     <vscale x 2 x i16> %1,
@@ -380,7 +380,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i16(
 define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16(
     <vscale x 4 x i16> %1,
@@ -424,7 +424,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i16(
 define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16(
     <vscale x 8 x i16> %1,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i16(
 define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16(
     <vscale x 16 x i16> %1,
@@ -512,7 +512,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32(
 define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32(
     <vscale x 1 x i32> %1,
@@ -556,7 +556,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i32(
 define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32(
     <vscale x 2 x i32> %1,
@@ -600,7 +600,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i32(
 define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32(
     <vscale x 4 x i32> %1,
@@ -644,7 +644,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i32(
 define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32(
     <vscale x 8 x i32> %1,
@@ -688,7 +688,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i8.i8(
 define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -728,7 +728,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i8.i8(
 define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -768,7 +768,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i8.i8(
 define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -808,7 +808,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i8.i8(
 define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -848,7 +848,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i8.i8(
 define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -888,7 +888,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i8.i8(
 define <vscale x 32 x i1> @intrinsic_vmsltu_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -928,7 +928,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i16.i16(
 define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -968,7 +968,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i16.i16(
 define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -1008,7 +1008,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i16.i16(
 define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -1048,7 +1048,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i16.i16(
 define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -1088,7 +1088,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i16.i16(
 define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -1128,7 +1128,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.i32(
 define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -1168,7 +1168,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i32.i32(
 define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -1208,7 +1208,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i32.i32(
 define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -1248,7 +1248,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i32.i32(
 define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv64.ll
index e085ab569720..462173e83329 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i8(
 define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8(
     <vscale x 1 x i8> %1,
@@ -72,7 +72,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i8(
 define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8(
     <vscale x 2 x i8> %1,
@@ -116,7 +116,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i8(
 define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8(
     <vscale x 4 x i8> %1,
@@ -160,7 +160,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i8(
 define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8(
     <vscale x 8 x i8> %1,
@@ -204,7 +204,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i8(
 define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8(
     <vscale x 16 x i8> %1,
@@ -248,7 +248,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i8(
 define <vscale x 32 x i1> @intrinsic_vmsltu_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8(
     <vscale x 32 x i8> %1,
@@ -292,7 +292,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i16(
 define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16(
     <vscale x 1 x i16> %1,
@@ -336,7 +336,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i16(
 define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16(
     <vscale x 2 x i16> %1,
@@ -380,7 +380,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i16(
 define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16(
     <vscale x 4 x i16> %1,
@@ -424,7 +424,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i16(
 define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16(
     <vscale x 8 x i16> %1,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i16(
 define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16(
     <vscale x 16 x i16> %1,
@@ -512,7 +512,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32(
 define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32(
     <vscale x 1 x i32> %1,
@@ -556,7 +556,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i32(
 define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32(
     <vscale x 2 x i32> %1,
@@ -600,7 +600,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i32(
 define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32(
     <vscale x 4 x i32> %1,
@@ -644,7 +644,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i32(
 define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32(
     <vscale x 8 x i32> %1,
@@ -688,7 +688,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i64(
 define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i64_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i64(
     <vscale x 1 x i64> %1,
@@ -732,7 +732,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i64(
 define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i64_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64(
     <vscale x 2 x i64> %1,
@@ -776,7 +776,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i64(
 define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i64_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64(
     <vscale x 4 x i64> %1,
@@ -820,7 +820,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i8.i8(
 define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -860,7 +860,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i8.i8(
 define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -900,7 +900,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i8.i8(
 define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -940,7 +940,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i8.i8(
 define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -980,7 +980,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i8.i8(
 define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1020,7 +1020,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i8.i8(
 define <vscale x 32 x i1> @intrinsic_vmsltu_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1060,7 +1060,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i16.i16(
 define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -1100,7 +1100,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i16.i16(
 define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -1140,7 +1140,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i16.i16(
 define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -1180,7 +1180,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i16.i16(
 define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -1220,7 +1220,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i16.i16(
 define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -1260,7 +1260,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.i32(
 define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -1300,7 +1300,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i32.i32(
 define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -1340,7 +1340,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i32.i32(
 define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -1380,7 +1380,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i32.i32(
 define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -1420,7 +1420,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i64.i64(
 define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -1460,7 +1460,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i64.i64(
 define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -1500,7 +1500,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i64.i64(
 define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsne-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsne-rv32.ll
index 0d36d104357f..23a9a4bf66fd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsne-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsne-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8(
 define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8(
     <vscale x 1 x i8> %1,
@@ -72,7 +72,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8(
 define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8(
     <vscale x 2 x i8> %1,
@@ -116,7 +116,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8(
 define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8(
     <vscale x 4 x i8> %1,
@@ -160,7 +160,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8(
 define <vscale x 8 x i1> @intrinsic_vmsne_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8(
     <vscale x 8 x i8> %1,
@@ -204,7 +204,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8(
 define <vscale x 16 x i1> @intrinsic_vmsne_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8(
     <vscale x 16 x i8> %1,
@@ -248,7 +248,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8(
 define <vscale x 32 x i1> @intrinsic_vmsne_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8(
     <vscale x 32 x i8> %1,
@@ -292,7 +292,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16(
 define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16(
     <vscale x 1 x i16> %1,
@@ -336,7 +336,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16(
 define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16(
     <vscale x 2 x i16> %1,
@@ -380,7 +380,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16(
 define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16(
     <vscale x 4 x i16> %1,
@@ -424,7 +424,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16(
 define <vscale x 8 x i1> @intrinsic_vmsne_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16(
     <vscale x 8 x i16> %1,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16(
 define <vscale x 16 x i1> @intrinsic_vmsne_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16(
     <vscale x 16 x i16> %1,
@@ -512,7 +512,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32(
 define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32(
     <vscale x 1 x i32> %1,
@@ -556,7 +556,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32(
 define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32(
     <vscale x 2 x i32> %1,
@@ -600,7 +600,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32(
 define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32(
     <vscale x 4 x i32> %1,
@@ -644,7 +644,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32(
 define <vscale x 8 x i1> @intrinsic_vmsne_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32(
     <vscale x 8 x i32> %1,
@@ -688,7 +688,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8.i8(
 define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -728,7 +728,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8.i8(
 define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -768,7 +768,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8.i8(
 define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -808,7 +808,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8.i8(
 define <vscale x 8 x i1> @intrinsic_vmsne_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -848,7 +848,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8.i8(
 define <vscale x 16 x i1> @intrinsic_vmsne_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -888,7 +888,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8.i8(
 define <vscale x 32 x i1> @intrinsic_vmsne_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -928,7 +928,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16.i16(
 define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -968,7 +968,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16.i16(
 define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -1008,7 +1008,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16.i16(
 define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -1048,7 +1048,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16.i16(
 define <vscale x 8 x i1> @intrinsic_vmsne_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -1088,7 +1088,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16.i16(
 define <vscale x 16 x i1> @intrinsic_vmsne_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -1128,7 +1128,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32(
 define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -1168,7 +1168,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32.i32(
 define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -1208,7 +1208,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32.i32(
 define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -1248,7 +1248,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32.i32(
 define <vscale x 8 x i1> @intrinsic_vmsne_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -1276,7 +1276,7 @@ entry:
 define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -1304,7 +1304,7 @@ entry:
 define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1332,7 +1332,7 @@ entry:
 define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1360,7 +1360,7 @@ entry:
 define <vscale x 8 x i1> @intrinsic_vmsne_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1388,7 +1388,7 @@ entry:
 define <vscale x 16 x i1> @intrinsic_vmsne_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1416,7 +1416,7 @@ entry:
 define <vscale x 32 x i1> @intrinsic_vmsne_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1444,7 +1444,7 @@ entry:
 define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -1472,7 +1472,7 @@ entry:
 define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -1500,7 +1500,7 @@ entry:
 define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -1528,7 +1528,7 @@ entry:
 define <vscale x 8 x i1> @intrinsic_vmsne_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -1556,7 +1556,7 @@ entry:
 define <vscale x 16 x i1> @intrinsic_vmsne_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -1584,7 +1584,7 @@ entry:
 define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -1612,7 +1612,7 @@ entry:
 define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -1640,7 +1640,7 @@ entry:
 define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -1668,7 +1668,7 @@ entry:
 define <vscale x 8 x i1> @intrinsic_vmsne_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsne-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsne-rv64.ll
index 960b9d0d03d6..765567ebb256 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsne-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsne-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8(
 define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8(
     <vscale x 1 x i8> %1,
@@ -72,7 +72,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8(
 define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8(
     <vscale x 2 x i8> %1,
@@ -116,7 +116,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8(
 define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8(
     <vscale x 4 x i8> %1,
@@ -160,7 +160,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8(
 define <vscale x 8 x i1> @intrinsic_vmsne_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8(
     <vscale x 8 x i8> %1,
@@ -204,7 +204,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8(
 define <vscale x 16 x i1> @intrinsic_vmsne_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8(
     <vscale x 16 x i8> %1,
@@ -248,7 +248,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8(
 define <vscale x 32 x i1> @intrinsic_vmsne_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8(
     <vscale x 32 x i8> %1,
@@ -292,7 +292,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16(
 define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16(
     <vscale x 1 x i16> %1,
@@ -336,7 +336,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16(
 define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16(
     <vscale x 2 x i16> %1,
@@ -380,7 +380,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16(
 define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16(
     <vscale x 4 x i16> %1,
@@ -424,7 +424,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16(
 define <vscale x 8 x i1> @intrinsic_vmsne_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16(
     <vscale x 8 x i16> %1,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16(
 define <vscale x 16 x i1> @intrinsic_vmsne_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16(
     <vscale x 16 x i16> %1,
@@ -512,7 +512,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32(
 define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32(
     <vscale x 1 x i32> %1,
@@ -556,7 +556,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32(
 define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32(
     <vscale x 2 x i32> %1,
@@ -600,7 +600,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32(
 define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32(
     <vscale x 4 x i32> %1,
@@ -644,7 +644,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32(
 define <vscale x 8 x i1> @intrinsic_vmsne_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32(
     <vscale x 8 x i32> %1,
@@ -688,7 +688,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64(
 define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i64_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64(
     <vscale x 1 x i64> %1,
@@ -732,7 +732,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64(
 define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i64_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64(
     <vscale x 2 x i64> %1,
@@ -776,7 +776,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64(
 define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i64_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64(
     <vscale x 4 x i64> %1,
@@ -820,7 +820,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8.i8(
 define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -860,7 +860,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8.i8(
 define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -900,7 +900,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8.i8(
 define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -940,7 +940,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8.i8(
 define <vscale x 8 x i1> @intrinsic_vmsne_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -980,7 +980,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8.i8(
 define <vscale x 16 x i1> @intrinsic_vmsne_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1020,7 +1020,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8.i8(
 define <vscale x 32 x i1> @intrinsic_vmsne_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1060,7 +1060,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16.i16(
 define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -1100,7 +1100,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16.i16(
 define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -1140,7 +1140,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16.i16(
 define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -1180,7 +1180,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16.i16(
 define <vscale x 8 x i1> @intrinsic_vmsne_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -1220,7 +1220,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16.i16(
 define <vscale x 16 x i1> @intrinsic_vmsne_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -1260,7 +1260,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32(
 define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -1300,7 +1300,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32.i32(
 define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -1340,7 +1340,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32.i32(
 define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -1380,7 +1380,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32.i32(
 define <vscale x 8 x i1> @intrinsic_vmsne_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -1420,7 +1420,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64.i64(
 define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -1460,7 +1460,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64.i64(
 define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -1500,7 +1500,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64.i64(
 define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
@@ -1528,7 +1528,7 @@ entry:
 define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -1556,7 +1556,7 @@ entry:
 define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1584,7 +1584,7 @@ entry:
 define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1612,7 +1612,7 @@ entry:
 define <vscale x 8 x i1> @intrinsic_vmsne_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1640,7 +1640,7 @@ entry:
 define <vscale x 16 x i1> @intrinsic_vmsne_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1668,7 +1668,7 @@ entry:
 define <vscale x 32 x i1> @intrinsic_vmsne_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1696,7 +1696,7 @@ entry:
 define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -1724,7 +1724,7 @@ entry:
 define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -1752,7 +1752,7 @@ entry:
 define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -1780,7 +1780,7 @@ entry:
 define <vscale x 8 x i1> @intrinsic_vmsne_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -1808,7 +1808,7 @@ entry:
 define <vscale x 16 x i1> @intrinsic_vmsne_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -1836,7 +1836,7 @@ entry:
 define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -1864,7 +1864,7 @@ entry:
 define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -1892,7 +1892,7 @@ entry:
 define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -1920,7 +1920,7 @@ entry:
 define <vscale x 8 x i1> @intrinsic_vmsne_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -1948,7 +1948,7 @@ entry:
 define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -1976,7 +1976,7 @@ entry:
 define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -2004,7 +2004,7 @@ entry:
 define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsof-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsof-rv32.ll
index c2a897197f75..3b91bc3648c3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsof-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsof-rv32.ll
@@ -29,7 +29,7 @@ define <vscale x 1 x i1> @intrinsic_vmsof_mask_m_nxv1i1_nxv1i1(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv1i1_nxv1i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v17
 ; CHECK-NEXT:    vmsof.m v25, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -71,7 +71,7 @@ define <vscale x 2 x i1> @intrinsic_vmsof_mask_m_nxv2i1_nxv2i1(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv2i1_nxv2i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v17
 ; CHECK-NEXT:    vmsof.m v25, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -113,7 +113,7 @@ define <vscale x 4 x i1> @intrinsic_vmsof_mask_m_nxv4i1_nxv4i1(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv4i1_nxv4i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v17
 ; CHECK-NEXT:    vmsof.m v25, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -155,7 +155,7 @@ define <vscale x 8 x i1> @intrinsic_vmsof_mask_m_nxv8i1_nxv8i1(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv8i1_nxv8i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v17
 ; CHECK-NEXT:    vmsof.m v25, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -197,7 +197,7 @@ define <vscale x 16 x i1> @intrinsic_vmsof_mask_m_nxv16i1_nxv16i1(<vscale x 16 x
 ; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv16i1_nxv16i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v17
 ; CHECK-NEXT:    vmsof.m v25, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -239,7 +239,7 @@ define <vscale x 32 x i1> @intrinsic_vmsof_mask_m_nxv32i1_nxv32i1(<vscale x 32 x
 ; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv32i1_nxv32i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v17
 ; CHECK-NEXT:    vmsof.m v25, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -281,7 +281,7 @@ define <vscale x 64 x i1> @intrinsic_vmsof_mask_m_nxv64i1_nxv64i1(<vscale x 64 x
 ; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv64i1_nxv64i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v17
 ; CHECK-NEXT:    vmsof.m v25, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsof-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsof-rv64.ll
index 83774254f337..7d1bdd8b6451 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsof-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsof-rv64.ll
@@ -29,7 +29,7 @@ define <vscale x 1 x i1> @intrinsic_vmsof_mask_m_nxv1i1_nxv1i1(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv1i1_nxv1i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v17
 ; CHECK-NEXT:    vmsof.m v25, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -71,7 +71,7 @@ define <vscale x 2 x i1> @intrinsic_vmsof_mask_m_nxv2i1_nxv2i1(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv2i1_nxv2i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v17
 ; CHECK-NEXT:    vmsof.m v25, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -113,7 +113,7 @@ define <vscale x 4 x i1> @intrinsic_vmsof_mask_m_nxv4i1_nxv4i1(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv4i1_nxv4i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v17
 ; CHECK-NEXT:    vmsof.m v25, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -155,7 +155,7 @@ define <vscale x 8 x i1> @intrinsic_vmsof_mask_m_nxv8i1_nxv8i1(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv8i1_nxv8i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v17
 ; CHECK-NEXT:    vmsof.m v25, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -197,7 +197,7 @@ define <vscale x 16 x i1> @intrinsic_vmsof_mask_m_nxv16i1_nxv16i1(<vscale x 16 x
 ; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv16i1_nxv16i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v17
 ; CHECK-NEXT:    vmsof.m v25, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -239,7 +239,7 @@ define <vscale x 32 x i1> @intrinsic_vmsof_mask_m_nxv32i1_nxv32i1(<vscale x 32 x
 ; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv32i1_nxv32i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v17
 ; CHECK-NEXT:    vmsof.m v25, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -281,7 +281,7 @@ define <vscale x 64 x i1> @intrinsic_vmsof_mask_m_nxv64i1_nxv64i1(<vscale x 64 x
 ; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv64i1_nxv64i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v17
 ; CHECK-NEXT:    vmsof.m v25, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmul-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmul-rv32.ll
index 0b798e1c3a0b..f9eb7c3b5594 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmul-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmul-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vmul_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vmul.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vmul_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vmul.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vmul.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vmul_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vmul.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vmul.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vmul_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vmul.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vmul.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vmul_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vmul.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vmul.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vmul_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vmul.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vmul_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv64i8_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -308,7 +308,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vmul.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vmul_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vmul.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vmul.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vmul_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vmul.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vmul.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vmul_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vmul.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vmul.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vmul_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vmul.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vmul.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vmul_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vmul.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vmul_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv32i16_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vmul_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -588,7 +588,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vmul.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vmul_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vmul.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -628,7 +628,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vmul.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vmul_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vmul.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -668,7 +668,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vmul.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vmul_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vmul.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -708,7 +708,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vmul_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv16i32_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vmul_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -788,7 +788,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vmul.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vmul_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vmul.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -828,7 +828,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vmul.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vmul_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vmul.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -868,7 +868,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vmul.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vmul_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vmul.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -908,7 +908,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vmul.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vmul_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vmul.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -948,7 +948,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vmul.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vmul_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vmul.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -988,7 +988,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vmul_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vmul.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vmul_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vmul.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vmul.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vmul_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vmul.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vmul.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vmul_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vmul.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vmul.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vmul_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vmul.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vmul.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vmul_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vmul.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1228,7 +1228,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vmul_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1268,7 +1268,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vmul_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1308,7 +1308,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vmul.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vmul_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vmul.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1348,7 +1348,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vmul.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vmul_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vmul.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1388,7 +1388,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vmul.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vmul_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vmul.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1428,7 +1428,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vmul_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmul-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmul-rv64.ll
index fe6e710b7ff9..e5c38c7d5e0a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmul-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmul-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vmul_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vmul.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vmul_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vmul.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vmul.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vmul_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vmul.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vmul.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vmul_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vmul.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vmul.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vmul_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vmul.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vmul.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vmul_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vmul.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vmul_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv64i8_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -308,7 +308,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vmul.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vmul_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vmul.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vmul.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vmul_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vmul.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vmul.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vmul_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vmul.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vmul.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vmul_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vmul.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vmul.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vmul_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vmul.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vmul_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv32i16_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vmul_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -588,7 +588,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vmul.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vmul_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vmul.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -628,7 +628,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vmul.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vmul_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vmul.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -668,7 +668,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vmul.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vmul_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vmul.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -708,7 +708,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vmul_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv16i32_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vmul_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv1i64_nxv1i64_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -788,7 +788,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vmul.mask.nxv2i64.nxv2i64(
 define <vscale x 2 x i64> @intrinsic_vmul_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv2i64_nxv2i64_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vmul.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -828,7 +828,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vmul.mask.nxv4i64.nxv4i64(
 define <vscale x 4 x i64> @intrinsic_vmul_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv4i64_nxv4i64_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vmul.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -868,7 +868,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vmul.mask.nxv8i64.nxv8i64(
 define <vscale x 8 x i64> @intrinsic_vmul_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv8i64_nxv8i64_nxv8i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vmul.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -908,7 +908,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vmul_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -948,7 +948,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vmul.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vmul_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vmul.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -988,7 +988,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vmul.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vmul_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vmul.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vmul.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vmul_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vmul.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vmul.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vmul_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vmul.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vmul.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vmul_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vmul.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vmul_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vmul.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vmul_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vmul.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1228,7 +1228,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vmul.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vmul_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vmul.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1268,7 +1268,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vmul.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vmul_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vmul.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1308,7 +1308,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vmul.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vmul_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vmul.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1348,7 +1348,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vmul.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vmul_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vmul.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1388,7 +1388,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vmul_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1428,7 +1428,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vmul_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1468,7 +1468,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vmul.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vmul_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vmul.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1508,7 +1508,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vmul.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vmul_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vmul.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1548,7 +1548,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vmul.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vmul_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vmul.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1588,7 +1588,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vmul_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1628,7 +1628,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.i64(
 define <vscale x 1 x i64> @intrinsic_vmul_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv1i64_nxv1i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1668,7 +1668,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vmul.mask.nxv2i64.i64(
 define <vscale x 2 x i64> @intrinsic_vmul_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv2i64_nxv2i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vmul.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1708,7 +1708,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vmul.mask.nxv4i64.i64(
 define <vscale x 4 x i64> @intrinsic_vmul_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv4i64_nxv4i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vmul.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1748,7 +1748,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vmul.mask.nxv8i64.i64(
 define <vscale x 8 x i64> @intrinsic_vmul_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv8i64_nxv8i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vmul.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmulh-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmulh-rv32.ll
index 3392b6a6f3bb..4e24330e4af2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmulh-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmulh-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vmulh.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vmulh_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vmulh.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vmulh.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vmulh_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vmulh.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vmulh.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vmulh_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vmulh.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vmulh.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vmulh_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vmulh.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vmulh.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vmulh_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vmulh.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vmulh.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vmulh_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vmulh.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vmulh.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vmulh_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv64i8_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vmulh.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -308,7 +308,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vmulh.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vmulh_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vmulh.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vmulh.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vmulh_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vmulh.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vmulh.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vmulh_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vmulh.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vmulh.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vmulh_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vmulh.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vmulh.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vmulh_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vmulh.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vmulh.mask.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vmulh_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv32i16_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vmulh.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vmulh.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vmulh_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vmulh.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -588,7 +588,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vmulh.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vmulh_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vmulh.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -628,7 +628,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vmulh.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vmulh_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vmulh.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -668,7 +668,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vmulh.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vmulh_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vmulh.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -708,7 +708,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vmulh.mask.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vmulh_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv16i32_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vmulh.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vmulh.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vmulh_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vmulh.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -788,7 +788,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vmulh.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vmulh_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vmulh.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -828,7 +828,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vmulh.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vmulh_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vmulh.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -868,7 +868,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vmulh.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vmulh_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vmulh.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -908,7 +908,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vmulh.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vmulh_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vmulh.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -948,7 +948,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vmulh.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vmulh_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vmulh.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -988,7 +988,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vmulh.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vmulh_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vmulh.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vmulh.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vmulh_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vmulh.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vmulh.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vmulh_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vmulh.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vmulh.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vmulh_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vmulh.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vmulh.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vmulh_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vmulh.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vmulh.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vmulh_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vmulh.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1228,7 +1228,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vmulh.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vmulh_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vmulh.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1268,7 +1268,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vmulh.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vmulh_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vmulh.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1308,7 +1308,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vmulh.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vmulh_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vmulh.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1348,7 +1348,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vmulh.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vmulh_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vmulh.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1388,7 +1388,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vmulh.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vmulh_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vmulh.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1428,7 +1428,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vmulh.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vmulh_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vmulh.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmulh-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmulh-rv64.ll
index 1749aafa2702..22ce3f43264c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmulh-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmulh-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vmulh.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vmulh_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vmulh.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vmulh.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vmulh_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vmulh.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vmulh.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vmulh_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vmulh.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vmulh.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vmulh_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vmulh.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vmulh.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vmulh_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vmulh.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vmulh.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vmulh_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vmulh.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vmulh.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vmulh_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv64i8_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vmulh.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -308,7 +308,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vmulh.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vmulh_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vmulh.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vmulh.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vmulh_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vmulh.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vmulh.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vmulh_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vmulh.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vmulh.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vmulh_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vmulh.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vmulh.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vmulh_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vmulh.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vmulh.mask.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vmulh_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv32i16_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vmulh.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vmulh.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vmulh_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vmulh.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -588,7 +588,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vmulh.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vmulh_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vmulh.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -628,7 +628,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vmulh.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vmulh_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vmulh.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -668,7 +668,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vmulh.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vmulh_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vmulh.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -708,7 +708,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vmulh.mask.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vmulh_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv16i32_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vmulh.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vmulh_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv1i64_nxv1i64_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -788,7 +788,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vmulh.mask.nxv2i64.nxv2i64(
 define <vscale x 2 x i64> @intrinsic_vmulh_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv2i64_nxv2i64_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vmulh.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -828,7 +828,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vmulh.mask.nxv4i64.nxv4i64(
 define <vscale x 4 x i64> @intrinsic_vmulh_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv4i64_nxv4i64_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vmulh.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -868,7 +868,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vmulh.mask.nxv8i64.nxv8i64(
 define <vscale x 8 x i64> @intrinsic_vmulh_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv8i64_nxv8i64_nxv8i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vmulh.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -908,7 +908,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vmulh.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vmulh_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vmulh.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -948,7 +948,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vmulh.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vmulh_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vmulh.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -988,7 +988,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vmulh.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vmulh_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vmulh.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vmulh.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vmulh_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vmulh.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vmulh.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vmulh_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vmulh.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vmulh.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vmulh_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vmulh.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vmulh.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vmulh_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vmulh.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vmulh.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vmulh_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vmulh.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1228,7 +1228,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vmulh.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vmulh_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vmulh.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1268,7 +1268,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vmulh.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vmulh_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vmulh.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1308,7 +1308,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vmulh.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vmulh_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vmulh.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1348,7 +1348,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vmulh.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vmulh_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vmulh.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1388,7 +1388,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vmulh.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vmulh_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vmulh.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1428,7 +1428,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vmulh.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vmulh_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vmulh.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1468,7 +1468,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vmulh.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vmulh_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vmulh.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1508,7 +1508,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vmulh.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vmulh_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vmulh.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1548,7 +1548,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vmulh.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vmulh_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vmulh.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1588,7 +1588,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vmulh.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vmulh_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vmulh.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1628,7 +1628,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vmulh.mask.nxv1i64.i64(
 define <vscale x 1 x i64> @intrinsic_vmulh_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv1i64_nxv1i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vmulh.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1668,7 +1668,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vmulh.mask.nxv2i64.i64(
 define <vscale x 2 x i64> @intrinsic_vmulh_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv2i64_nxv2i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vmulh.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1708,7 +1708,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vmulh.mask.nxv4i64.i64(
 define <vscale x 4 x i64> @intrinsic_vmulh_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv4i64_nxv4i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vmulh.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1748,7 +1748,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vmulh.mask.nxv8i64.i64(
 define <vscale x 8 x i64> @intrinsic_vmulh_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv8i64_nxv8i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vmulh.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv32.ll
index aea1de9106b4..8c4ae4ba0443 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vmulhsu.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vmulhsu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vmulhsu.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vmulhsu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vmulhsu.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vmulhsu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vmulhsu.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vmulhsu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vmulhsu.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vmulhsu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vmulhsu.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vmulhsu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vmulhsu.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vmulhsu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv64i8_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -308,7 +308,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vmulhsu.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vmulhsu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vmulhsu.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vmulhsu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vmulhsu.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vmulhsu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vmulhsu.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vmulhsu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vmulhsu.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vmulhsu_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vmulhsu.mask.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vmulhsu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv32i16_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vmulhsu.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vmulhsu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -588,7 +588,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vmulhsu.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vmulhsu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -628,7 +628,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vmulhsu.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vmulhsu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -668,7 +668,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vmulhsu.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vmulhsu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -708,7 +708,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vmulhsu.mask.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vmulhsu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv16i32_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vmulhsu.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vmulhsu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -788,7 +788,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vmulhsu.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vmulhsu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -828,7 +828,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vmulhsu.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vmulhsu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -868,7 +868,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vmulhsu.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vmulhsu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -908,7 +908,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vmulhsu.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vmulhsu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -948,7 +948,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vmulhsu.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vmulhsu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -988,7 +988,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vmulhsu.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vmulhsu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vmulhsu.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vmulhsu_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vmulhsu.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vmulhsu_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vmulhsu.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vmulhsu_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vmulhsu.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vmulhsu_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vmulhsu.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vmulhsu_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1228,7 +1228,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vmulhsu.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vmulhsu_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1268,7 +1268,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vmulhsu.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vmulhsu_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1308,7 +1308,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vmulhsu.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vmulhsu_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1348,7 +1348,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vmulhsu.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vmulhsu_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1388,7 +1388,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vmulhsu.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vmulhsu_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1428,7 +1428,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vmulhsu.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vmulhsu_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv64.ll
index dfff16e90cf6..f8e24801f96d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vmulhsu.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vmulhsu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vmulhsu.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vmulhsu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vmulhsu.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vmulhsu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vmulhsu.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vmulhsu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vmulhsu.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vmulhsu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vmulhsu.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vmulhsu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vmulhsu.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vmulhsu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv64i8_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -308,7 +308,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vmulhsu.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vmulhsu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vmulhsu.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vmulhsu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vmulhsu.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vmulhsu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vmulhsu.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vmulhsu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vmulhsu.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vmulhsu_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vmulhsu.mask.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vmulhsu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv32i16_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vmulhsu.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vmulhsu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -588,7 +588,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vmulhsu.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vmulhsu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -628,7 +628,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vmulhsu.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vmulhsu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -668,7 +668,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vmulhsu.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vmulhsu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -708,7 +708,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vmulhsu.mask.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vmulhsu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv16i32_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vmulhsu.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vmulhsu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv1i64_nxv1i64_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -788,7 +788,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vmulhsu.mask.nxv2i64.nxv2i64(
 define <vscale x 2 x i64> @intrinsic_vmulhsu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv2i64_nxv2i64_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -828,7 +828,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vmulhsu.mask.nxv4i64.nxv4i64(
 define <vscale x 4 x i64> @intrinsic_vmulhsu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv4i64_nxv4i64_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -868,7 +868,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vmulhsu.mask.nxv8i64.nxv8i64(
 define <vscale x 8 x i64> @intrinsic_vmulhsu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv8i64_nxv8i64_nxv8i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -908,7 +908,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vmulhsu.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vmulhsu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -948,7 +948,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vmulhsu.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vmulhsu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -988,7 +988,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vmulhsu.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vmulhsu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vmulhsu.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vmulhsu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vmulhsu.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vmulhsu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vmulhsu.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vmulhsu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vmulhsu.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vmulhsu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vmulhsu.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vmulhsu_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1228,7 +1228,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vmulhsu.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vmulhsu_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1268,7 +1268,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vmulhsu.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vmulhsu_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1308,7 +1308,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vmulhsu.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vmulhsu_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1348,7 +1348,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vmulhsu.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vmulhsu_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1388,7 +1388,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vmulhsu.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vmulhsu_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1428,7 +1428,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vmulhsu.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vmulhsu_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1468,7 +1468,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vmulhsu.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vmulhsu_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1508,7 +1508,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vmulhsu.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vmulhsu_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1548,7 +1548,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vmulhsu.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vmulhsu_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1588,7 +1588,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vmulhsu.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vmulhsu_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1628,7 +1628,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vmulhsu.mask.nxv1i64.i64(
 define <vscale x 1 x i64> @intrinsic_vmulhsu_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv1i64_nxv1i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1668,7 +1668,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vmulhsu.mask.nxv2i64.i64(
 define <vscale x 2 x i64> @intrinsic_vmulhsu_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv2i64_nxv2i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1708,7 +1708,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vmulhsu.mask.nxv4i64.i64(
 define <vscale x 4 x i64> @intrinsic_vmulhsu_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv4i64_nxv4i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1748,7 +1748,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vmulhsu.mask.nxv8i64.i64(
 define <vscale x 8 x i64> @intrinsic_vmulhsu_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv8i64_nxv8i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv32.ll
index 8997be2724c5..43726fb3097e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vmulhu.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vmulhu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vmulhu.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vmulhu.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vmulhu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vmulhu.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vmulhu.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vmulhu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vmulhu.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vmulhu.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vmulhu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vmulhu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vmulhu.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vmulhu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vmulhu.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vmulhu.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vmulhu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vmulhu.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vmulhu.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vmulhu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv64i8_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vmulhu.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -308,7 +308,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vmulhu.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vmulhu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vmulhu.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vmulhu.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vmulhu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vmulhu.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vmulhu.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vmulhu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vmulhu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vmulhu.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vmulhu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vmulhu.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vmulhu.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vmulhu_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vmulhu.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vmulhu.mask.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vmulhu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv32i16_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vmulhu.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vmulhu.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vmulhu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vmulhu.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -588,7 +588,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vmulhu.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vmulhu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vmulhu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -628,7 +628,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vmulhu.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vmulhu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vmulhu.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -668,7 +668,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vmulhu.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vmulhu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vmulhu.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -708,7 +708,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vmulhu.mask.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vmulhu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv16i32_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vmulhu.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vmulhu.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vmulhu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vmulhu.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -788,7 +788,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vmulhu.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vmulhu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vmulhu.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -828,7 +828,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vmulhu.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vmulhu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vmulhu.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -868,7 +868,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vmulhu.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vmulhu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vmulhu.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -908,7 +908,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vmulhu.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vmulhu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vmulhu.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -948,7 +948,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vmulhu.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vmulhu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vmulhu.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -988,7 +988,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vmulhu.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vmulhu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vmulhu.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vmulhu.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vmulhu_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vmulhu.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vmulhu.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vmulhu_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vmulhu.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vmulhu.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vmulhu_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vmulhu.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vmulhu.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vmulhu_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vmulhu.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vmulhu.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vmulhu_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vmulhu.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1228,7 +1228,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vmulhu.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vmulhu_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vmulhu.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1268,7 +1268,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vmulhu.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vmulhu_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vmulhu.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1308,7 +1308,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vmulhu.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vmulhu_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vmulhu.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1348,7 +1348,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vmulhu.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vmulhu_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vmulhu.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1388,7 +1388,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vmulhu.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vmulhu_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vmulhu.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1428,7 +1428,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vmulhu.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vmulhu_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vmulhu.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv64.ll
index 4561f0b390da..d6280bda2e68 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vmulhu.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vmulhu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vmulhu.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vmulhu.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vmulhu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vmulhu.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vmulhu.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vmulhu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vmulhu.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vmulhu.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vmulhu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vmulhu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vmulhu.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vmulhu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vmulhu.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vmulhu.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vmulhu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vmulhu.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vmulhu.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vmulhu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv64i8_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vmulhu.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -308,7 +308,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vmulhu.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vmulhu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vmulhu.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vmulhu.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vmulhu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vmulhu.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vmulhu.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vmulhu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vmulhu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vmulhu.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vmulhu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vmulhu.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vmulhu.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vmulhu_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vmulhu.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vmulhu.mask.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vmulhu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv32i16_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vmulhu.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vmulhu.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vmulhu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vmulhu.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -588,7 +588,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vmulhu.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vmulhu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vmulhu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -628,7 +628,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vmulhu.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vmulhu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vmulhu.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -668,7 +668,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vmulhu.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vmulhu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vmulhu.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -708,7 +708,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vmulhu.mask.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vmulhu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv16i32_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vmulhu.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vmulhu.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vmulhu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv1i64_nxv1i64_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vmulhu.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -788,7 +788,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vmulhu.mask.nxv2i64.nxv2i64(
 define <vscale x 2 x i64> @intrinsic_vmulhu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv2i64_nxv2i64_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vmulhu.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -828,7 +828,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vmulhu.mask.nxv4i64.nxv4i64(
 define <vscale x 4 x i64> @intrinsic_vmulhu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv4i64_nxv4i64_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vmulhu.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -868,7 +868,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vmulhu.mask.nxv8i64.nxv8i64(
 define <vscale x 8 x i64> @intrinsic_vmulhu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv8i64_nxv8i64_nxv8i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vmulhu.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -908,7 +908,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vmulhu.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vmulhu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vmulhu.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -948,7 +948,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vmulhu.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vmulhu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vmulhu.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -988,7 +988,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vmulhu.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vmulhu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vmulhu.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vmulhu.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vmulhu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vmulhu.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vmulhu.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vmulhu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vmulhu.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vmulhu.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vmulhu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vmulhu.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vmulhu.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vmulhu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vmulhu.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vmulhu.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vmulhu_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vmulhu.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1228,7 +1228,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vmulhu.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vmulhu_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vmulhu.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1268,7 +1268,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vmulhu.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vmulhu_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vmulhu.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1308,7 +1308,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vmulhu.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vmulhu_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vmulhu.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1348,7 +1348,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vmulhu.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vmulhu_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vmulhu.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1388,7 +1388,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vmulhu.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vmulhu_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vmulhu.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1428,7 +1428,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vmulhu.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vmulhu_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vmulhu.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1468,7 +1468,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vmulhu.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vmulhu_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vmulhu.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1508,7 +1508,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vmulhu.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vmulhu_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vmulhu.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1548,7 +1548,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vmulhu.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vmulhu_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vmulhu.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1588,7 +1588,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vmulhu.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vmulhu_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vmulhu.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1628,7 +1628,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vmulhu.mask.nxv1i64.i64(
 define <vscale x 1 x i64> @intrinsic_vmulhu_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv1i64_nxv1i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vmulhu.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1668,7 +1668,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vmulhu.mask.nxv2i64.i64(
 define <vscale x 2 x i64> @intrinsic_vmulhu_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv2i64_nxv2i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vmulhu.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1708,7 +1708,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vmulhu.mask.nxv4i64.i64(
 define <vscale x 4 x i64> @intrinsic_vmulhu_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv4i64_nxv4i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vmulhu.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1748,7 +1748,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vmulhu.mask.nxv8i64.i64(
 define <vscale x 8 x i64> @intrinsic_vmulhu_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv8i64_nxv8i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vmulhu.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vnclip-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnclip-rv32.ll
index e366aed945b9..cdd1672b33f1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnclip-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnclip-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vnclip_mask_wv_nxv2i8_nxv2i16_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv2i8_nxv2i16_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vnclip_mask_wv_nxv4i8_nxv4i16_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv4i8_nxv4i16_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vnclip_mask_wv_nxv8i8_nxv8i16_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv8i8_nxv8i16_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vnclip_mask_wv_nxv16i8_nxv16i16_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv16i8_nxv16i16_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vnclip_mask_wv_nxv32i8_nxv32i16_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv32i8_nxv32i16_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vnclip_mask_wv_nxv1i16_nxv1i32_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i16_nxv1i32_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -308,7 +308,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vnclip_mask_wv_nxv2i16_nxv2i32_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv2i16_nxv2i32_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vnclip_mask_wv_nxv4i16_nxv4i32_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv4i16_nxv4i32_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vnclip_mask_wv_nxv8i16_nxv8i32_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv8i16_nxv8i32_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vnclip_mask_wv_nxv16i16_nxv16i32_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv16i16_nxv16i32_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i8(
 define <vscale x 1 x i8> @intrinsic_vnclip_mask_wx_nxv1i8_nxv1i16_i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv1i8_nxv1i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i8(
     <vscale x 1 x i8> %0,
@@ -508,7 +508,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i8(
 define <vscale x 2 x i8> @intrinsic_vnclip_mask_wx_nxv2i8_nxv2i16_i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv2i8_nxv2i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i8(
     <vscale x 2 x i8> %0,
@@ -548,7 +548,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i8(
 define <vscale x 4 x i8> @intrinsic_vnclip_mask_wx_nxv4i8_nxv4i16_i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv4i8_nxv4i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i8(
     <vscale x 4 x i8> %0,
@@ -588,7 +588,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i8(
 define <vscale x 8 x i8> @intrinsic_vnclip_mask_wx_nxv8i8_nxv8i16_i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv8i8_nxv8i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i8(
     <vscale x 8 x i8> %0,
@@ -628,7 +628,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i8(
 define <vscale x 16 x i8> @intrinsic_vnclip_mask_wx_nxv16i8_nxv16i16_i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv16i8_nxv16i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i8(
     <vscale x 16 x i8> %0,
@@ -668,7 +668,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i8(
 define <vscale x 32 x i8> @intrinsic_vnclip_mask_wx_nxv32i8_nxv32i16_i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv32i8_nxv32i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i8(
     <vscale x 32 x i8> %0,
@@ -708,7 +708,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i16(
 define <vscale x 1 x i16> @intrinsic_vnclip_mask_wx_nxv1i16_nxv1i32_i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv1i16_nxv1i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i16(
     <vscale x 1 x i16> %0,
@@ -748,7 +748,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i16(
 define <vscale x 2 x i16> @intrinsic_vnclip_mask_wx_nxv2i16_nxv2i32_i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv2i16_nxv2i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i16(
     <vscale x 2 x i16> %0,
@@ -788,7 +788,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i16(
 define <vscale x 4 x i16> @intrinsic_vnclip_mask_wx_nxv4i16_nxv4i32_i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv4i16_nxv4i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i16(
     <vscale x 4 x i16> %0,
@@ -828,7 +828,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i16(
 define <vscale x 8 x i16> @intrinsic_vnclip_mask_wx_nxv8i16_nxv8i32_i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv8i16_nxv8i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i16(
     <vscale x 8 x i16> %0,
@@ -868,7 +868,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i16(
 define <vscale x 16 x i16> @intrinsic_vnclip_mask_wx_nxv16i16_nxv16i32_i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv16i16_nxv16i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i16(
     <vscale x 16 x i16> %0,
@@ -896,7 +896,7 @@ entry:
 define <vscale x 1 x i8> @intrinsic_vnclip_mask_wi_nxv1i8_nxv1i16_i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv1i8_nxv1i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i8(
     <vscale x 1 x i8> %0,
@@ -924,7 +924,7 @@ entry:
 define <vscale x 2 x i8> @intrinsic_vnclip_mask_wi_nxv2i8_nxv2i16_i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv2i8_nxv2i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i8(
     <vscale x 2 x i8> %0,
@@ -952,7 +952,7 @@ entry:
 define <vscale x 4 x i8> @intrinsic_vnclip_mask_wi_nxv4i8_nxv4i16_i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv4i8_nxv4i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i8(
     <vscale x 4 x i8> %0,
@@ -980,7 +980,7 @@ entry:
 define <vscale x 8 x i8> @intrinsic_vnclip_mask_wi_nxv8i8_nxv8i16_i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv8i8_nxv8i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i8(
     <vscale x 8 x i8> %0,
@@ -1008,7 +1008,7 @@ entry:
 define <vscale x 16 x i8> @intrinsic_vnclip_mask_wi_nxv16i8_nxv16i16_i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv16i8_nxv16i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i8(
     <vscale x 16 x i8> %0,
@@ -1036,7 +1036,7 @@ entry:
 define <vscale x 32 x i8> @intrinsic_vnclip_mask_wi_nxv32i8_nxv32i16_i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv32i8_nxv32i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i8(
     <vscale x 32 x i8> %0,
@@ -1064,7 +1064,7 @@ entry:
 define <vscale x 1 x i16> @intrinsic_vnclip_mask_wi_nxv1i16_nxv1i32_i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv1i16_nxv1i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i16(
     <vscale x 1 x i16> %0,
@@ -1092,7 +1092,7 @@ entry:
 define <vscale x 2 x i16> @intrinsic_vnclip_mask_wi_nxv2i16_nxv2i32_i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv2i16_nxv2i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i16(
     <vscale x 2 x i16> %0,
@@ -1120,7 +1120,7 @@ entry:
 define <vscale x 4 x i16> @intrinsic_vnclip_mask_wi_nxv4i16_nxv4i32_i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv4i16_nxv4i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i16(
     <vscale x 4 x i16> %0,
@@ -1148,7 +1148,7 @@ entry:
 define <vscale x 8 x i16> @intrinsic_vnclip_mask_wi_nxv8i16_nxv8i32_i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv8i16_nxv8i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i16(
     <vscale x 8 x i16> %0,
@@ -1176,7 +1176,7 @@ entry:
 define <vscale x 16 x i16> @intrinsic_vnclip_mask_wi_nxv16i16_nxv16i32_i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv16i16_nxv16i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i16(
     <vscale x 16 x i16> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vnclip-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnclip-rv64.ll
index e7d45d2d9353..dba1a9bc513e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnclip-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnclip-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vnclip_mask_wv_nxv2i8_nxv2i16_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv2i8_nxv2i16_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vnclip_mask_wv_nxv4i8_nxv4i16_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv4i8_nxv4i16_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vnclip_mask_wv_nxv8i8_nxv8i16_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv8i8_nxv8i16_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vnclip_mask_wv_nxv16i8_nxv16i16_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv16i8_nxv16i16_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vnclip_mask_wv_nxv32i8_nxv32i16_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv32i8_nxv32i16_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vnclip_mask_wv_nxv1i16_nxv1i32_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i16_nxv1i32_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -308,7 +308,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vnclip_mask_wv_nxv2i16_nxv2i32_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv2i16_nxv2i32_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vnclip_mask_wv_nxv4i16_nxv4i32_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv4i16_nxv4i32_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vnclip_mask_wv_nxv8i16_nxv8i32_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv8i16_nxv8i32_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vnclip_mask_wv_nxv16i16_nxv16i32_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv16i16_nxv16i32_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vnclip_mask_wv_nxv1i32_nxv1i64_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i32_nxv1i64_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -508,7 +508,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vnclip_mask_wv_nxv2i32_nxv2i64_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv2i32_nxv2i64_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -548,7 +548,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vnclip_mask_wv_nxv4i32_nxv4i64_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv4i32_nxv4i64_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -588,7 +588,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vnclip_mask_wv_nxv8i32_nxv8i64_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv8i32_nxv8i64_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -628,7 +628,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i8(
 define <vscale x 1 x i8> @intrinsic_vnclip_mask_wx_nxv1i8_nxv1i16_i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv1i8_nxv1i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i8(
     <vscale x 1 x i8> %0,
@@ -668,7 +668,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i8(
 define <vscale x 2 x i8> @intrinsic_vnclip_mask_wx_nxv2i8_nxv2i16_i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv2i8_nxv2i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i8(
     <vscale x 2 x i8> %0,
@@ -708,7 +708,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i8(
 define <vscale x 4 x i8> @intrinsic_vnclip_mask_wx_nxv4i8_nxv4i16_i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv4i8_nxv4i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i8(
     <vscale x 4 x i8> %0,
@@ -748,7 +748,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i8(
 define <vscale x 8 x i8> @intrinsic_vnclip_mask_wx_nxv8i8_nxv8i16_i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv8i8_nxv8i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i8(
     <vscale x 8 x i8> %0,
@@ -788,7 +788,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i8(
 define <vscale x 16 x i8> @intrinsic_vnclip_mask_wx_nxv16i8_nxv16i16_i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv16i8_nxv16i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i8(
     <vscale x 16 x i8> %0,
@@ -828,7 +828,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i8(
 define <vscale x 32 x i8> @intrinsic_vnclip_mask_wx_nxv32i8_nxv32i16_i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv32i8_nxv32i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i8(
     <vscale x 32 x i8> %0,
@@ -868,7 +868,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i16(
 define <vscale x 1 x i16> @intrinsic_vnclip_mask_wx_nxv1i16_nxv1i32_i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv1i16_nxv1i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i16(
     <vscale x 1 x i16> %0,
@@ -908,7 +908,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i16(
 define <vscale x 2 x i16> @intrinsic_vnclip_mask_wx_nxv2i16_nxv2i32_i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv2i16_nxv2i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i16(
     <vscale x 2 x i16> %0,
@@ -948,7 +948,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i16(
 define <vscale x 4 x i16> @intrinsic_vnclip_mask_wx_nxv4i16_nxv4i32_i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv4i16_nxv4i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i16(
     <vscale x 4 x i16> %0,
@@ -988,7 +988,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i16(
 define <vscale x 8 x i16> @intrinsic_vnclip_mask_wx_nxv8i16_nxv8i32_i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv8i16_nxv8i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i16(
     <vscale x 8 x i16> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i16(
 define <vscale x 16 x i16> @intrinsic_vnclip_mask_wx_nxv16i16_nxv16i32_i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv16i16_nxv16i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i16(
     <vscale x 16 x i16> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i32(
 define <vscale x 1 x i32> @intrinsic_vnclip_mask_wx_nxv1i32_nxv1i64_i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv1i32_nxv1i64_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i32(
     <vscale x 1 x i32> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i32(
 define <vscale x 2 x i32> @intrinsic_vnclip_mask_wx_nxv2i32_nxv2i64_i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv2i32_nxv2i64_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i32(
     <vscale x 2 x i32> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i32(
 define <vscale x 4 x i32> @intrinsic_vnclip_mask_wx_nxv4i32_nxv4i64_i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv4i32_nxv4i64_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i32(
     <vscale x 4 x i32> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i32(
 define <vscale x 8 x i32> @intrinsic_vnclip_mask_wx_nxv8i32_nxv8i64_i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv8i32_nxv8i64_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i32(
     <vscale x 8 x i32> %0,
@@ -1216,7 +1216,7 @@ entry:
 define <vscale x 1 x i8> @intrinsic_vnclip_mask_wi_nxv1i8_nxv1i16_i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv1i8_nxv1i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i8(
     <vscale x 1 x i8> %0,
@@ -1244,7 +1244,7 @@ entry:
 define <vscale x 2 x i8> @intrinsic_vnclip_mask_wi_nxv2i8_nxv2i16_i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv2i8_nxv2i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i8(
     <vscale x 2 x i8> %0,
@@ -1272,7 +1272,7 @@ entry:
 define <vscale x 4 x i8> @intrinsic_vnclip_mask_wi_nxv4i8_nxv4i16_i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv4i8_nxv4i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i8(
     <vscale x 4 x i8> %0,
@@ -1300,7 +1300,7 @@ entry:
 define <vscale x 8 x i8> @intrinsic_vnclip_mask_wi_nxv8i8_nxv8i16_i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv8i8_nxv8i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i8(
     <vscale x 8 x i8> %0,
@@ -1328,7 +1328,7 @@ entry:
 define <vscale x 16 x i8> @intrinsic_vnclip_mask_wi_nxv16i8_nxv16i16_i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv16i8_nxv16i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i8(
     <vscale x 16 x i8> %0,
@@ -1356,7 +1356,7 @@ entry:
 define <vscale x 32 x i8> @intrinsic_vnclip_mask_wi_nxv32i8_nxv32i16_i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv32i8_nxv32i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i8(
     <vscale x 32 x i8> %0,
@@ -1384,7 +1384,7 @@ entry:
 define <vscale x 1 x i16> @intrinsic_vnclip_mask_wi_nxv1i16_nxv1i32_i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv1i16_nxv1i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i16(
     <vscale x 1 x i16> %0,
@@ -1412,7 +1412,7 @@ entry:
 define <vscale x 2 x i16> @intrinsic_vnclip_mask_wi_nxv2i16_nxv2i32_i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv2i16_nxv2i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i16(
     <vscale x 2 x i16> %0,
@@ -1440,7 +1440,7 @@ entry:
 define <vscale x 4 x i16> @intrinsic_vnclip_mask_wi_nxv4i16_nxv4i32_i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv4i16_nxv4i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i16(
     <vscale x 4 x i16> %0,
@@ -1468,7 +1468,7 @@ entry:
 define <vscale x 8 x i16> @intrinsic_vnclip_mask_wi_nxv8i16_nxv8i32_i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv8i16_nxv8i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i16(
     <vscale x 8 x i16> %0,
@@ -1496,7 +1496,7 @@ entry:
 define <vscale x 16 x i16> @intrinsic_vnclip_mask_wi_nxv16i16_nxv16i32_i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv16i16_nxv16i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i16(
     <vscale x 16 x i16> %0,
@@ -1524,7 +1524,7 @@ entry:
 define <vscale x 1 x i32> @intrinsic_vnclip_mask_wi_nxv1i32_nxv1i64_i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv1i32_nxv1i64_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i32(
     <vscale x 1 x i32> %0,
@@ -1552,7 +1552,7 @@ entry:
 define <vscale x 2 x i32> @intrinsic_vnclip_mask_wi_nxv2i32_nxv2i64_i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv2i32_nxv2i64_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i32(
     <vscale x 2 x i32> %0,
@@ -1580,7 +1580,7 @@ entry:
 define <vscale x 4 x i32> @intrinsic_vnclip_mask_wi_nxv4i32_nxv4i64_i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv4i32_nxv4i64_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i32(
     <vscale x 4 x i32> %0,
@@ -1608,7 +1608,7 @@ entry:
 define <vscale x 8 x i32> @intrinsic_vnclip_mask_wi_nxv8i32_nxv8i64_i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv8i32_nxv8i64_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i32(
     <vscale x 8 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll
index d7e957f86e98..52241c591466 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vnclipu_mask_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv1i8_nxv1i16_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vnclipu_mask_wv_nxv2i8_nxv2i16_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv2i8_nxv2i16_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vnclipu_mask_wv_nxv4i8_nxv4i16_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv4i8_nxv4i16_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vnclipu_mask_wv_nxv8i8_nxv8i16_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv8i8_nxv8i16_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vnclipu_mask_wv_nxv16i8_nxv16i16_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv16i8_nxv16i16_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vnclipu_mask_wv_nxv32i8_nxv32i16_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv32i8_nxv32i16_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vnclipu_mask_wv_nxv1i16_nxv1i32_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv1i16_nxv1i32_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -308,7 +308,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vnclipu_mask_wv_nxv2i16_nxv2i32_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv2i16_nxv2i32_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vnclipu_mask_wv_nxv4i16_nxv4i32_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv4i16_nxv4i32_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vnclipu_mask_wv_nxv8i16_nxv8i32_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv8i16_nxv8i32_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vnclipu_mask_wv_nxv16i16_nxv16i32_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv16i16_nxv16i32_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i8(
 define <vscale x 1 x i8> @intrinsic_vnclipu_mask_wx_nxv1i8_nxv1i16_i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv1i8_nxv1i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i8(
     <vscale x 1 x i8> %0,
@@ -508,7 +508,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i8(
 define <vscale x 2 x i8> @intrinsic_vnclipu_mask_wx_nxv2i8_nxv2i16_i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv2i8_nxv2i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i8(
     <vscale x 2 x i8> %0,
@@ -548,7 +548,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i8(
 define <vscale x 4 x i8> @intrinsic_vnclipu_mask_wx_nxv4i8_nxv4i16_i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv4i8_nxv4i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i8(
     <vscale x 4 x i8> %0,
@@ -588,7 +588,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i8(
 define <vscale x 8 x i8> @intrinsic_vnclipu_mask_wx_nxv8i8_nxv8i16_i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv8i8_nxv8i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i8(
     <vscale x 8 x i8> %0,
@@ -628,7 +628,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i8(
 define <vscale x 16 x i8> @intrinsic_vnclipu_mask_wx_nxv16i8_nxv16i16_i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv16i8_nxv16i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i8(
     <vscale x 16 x i8> %0,
@@ -668,7 +668,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i8(
 define <vscale x 32 x i8> @intrinsic_vnclipu_mask_wx_nxv32i8_nxv32i16_i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv32i8_nxv32i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i8(
     <vscale x 32 x i8> %0,
@@ -708,7 +708,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i16(
 define <vscale x 1 x i16> @intrinsic_vnclipu_mask_wx_nxv1i16_nxv1i32_i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv1i16_nxv1i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i16(
     <vscale x 1 x i16> %0,
@@ -748,7 +748,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i16(
 define <vscale x 2 x i16> @intrinsic_vnclipu_mask_wx_nxv2i16_nxv2i32_i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv2i16_nxv2i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i16(
     <vscale x 2 x i16> %0,
@@ -788,7 +788,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i16(
 define <vscale x 4 x i16> @intrinsic_vnclipu_mask_wx_nxv4i16_nxv4i32_i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv4i16_nxv4i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i16(
     <vscale x 4 x i16> %0,
@@ -828,7 +828,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i16(
 define <vscale x 8 x i16> @intrinsic_vnclipu_mask_wx_nxv8i16_nxv8i32_i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv8i16_nxv8i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i16(
     <vscale x 8 x i16> %0,
@@ -868,7 +868,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i16(
 define <vscale x 16 x i16> @intrinsic_vnclipu_mask_wx_nxv16i16_nxv16i32_i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv16i16_nxv16i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i16(
     <vscale x 16 x i16> %0,
@@ -896,7 +896,7 @@ entry:
 define <vscale x 1 x i8> @intrinsic_vnclipu_mask_wi_nxv1i8_nxv1i16_i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv1i8_nxv1i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i8(
     <vscale x 1 x i8> %0,
@@ -924,7 +924,7 @@ entry:
 define <vscale x 2 x i8> @intrinsic_vnclipu_mask_wi_nxv2i8_nxv2i16_i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv2i8_nxv2i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i8(
     <vscale x 2 x i8> %0,
@@ -952,7 +952,7 @@ entry:
 define <vscale x 4 x i8> @intrinsic_vnclipu_mask_wi_nxv4i8_nxv4i16_i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv4i8_nxv4i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i8(
     <vscale x 4 x i8> %0,
@@ -980,7 +980,7 @@ entry:
 define <vscale x 8 x i8> @intrinsic_vnclipu_mask_wi_nxv8i8_nxv8i16_i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv8i8_nxv8i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i8(
     <vscale x 8 x i8> %0,
@@ -1008,7 +1008,7 @@ entry:
 define <vscale x 16 x i8> @intrinsic_vnclipu_mask_wi_nxv16i8_nxv16i16_i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv16i8_nxv16i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i8(
     <vscale x 16 x i8> %0,
@@ -1036,7 +1036,7 @@ entry:
 define <vscale x 32 x i8> @intrinsic_vnclipu_mask_wi_nxv32i8_nxv32i16_i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv32i8_nxv32i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i8(
     <vscale x 32 x i8> %0,
@@ -1064,7 +1064,7 @@ entry:
 define <vscale x 1 x i16> @intrinsic_vnclipu_mask_wi_nxv1i16_nxv1i32_i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv1i16_nxv1i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i16(
     <vscale x 1 x i16> %0,
@@ -1092,7 +1092,7 @@ entry:
 define <vscale x 2 x i16> @intrinsic_vnclipu_mask_wi_nxv2i16_nxv2i32_i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv2i16_nxv2i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i16(
     <vscale x 2 x i16> %0,
@@ -1120,7 +1120,7 @@ entry:
 define <vscale x 4 x i16> @intrinsic_vnclipu_mask_wi_nxv4i16_nxv4i32_i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv4i16_nxv4i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i16(
     <vscale x 4 x i16> %0,
@@ -1148,7 +1148,7 @@ entry:
 define <vscale x 8 x i16> @intrinsic_vnclipu_mask_wi_nxv8i16_nxv8i32_i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv8i16_nxv8i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i16(
     <vscale x 8 x i16> %0,
@@ -1176,7 +1176,7 @@ entry:
 define <vscale x 16 x i16> @intrinsic_vnclipu_mask_wi_nxv16i16_nxv16i32_i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv16i16_nxv16i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i16(
     <vscale x 16 x i16> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv64.ll
index fe635213e06f..ec649b52abfd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vnclipu_mask_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv1i8_nxv1i16_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vnclipu_mask_wv_nxv2i8_nxv2i16_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv2i8_nxv2i16_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vnclipu_mask_wv_nxv4i8_nxv4i16_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv4i8_nxv4i16_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vnclipu_mask_wv_nxv8i8_nxv8i16_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv8i8_nxv8i16_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vnclipu_mask_wv_nxv16i8_nxv16i16_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv16i8_nxv16i16_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vnclipu_mask_wv_nxv32i8_nxv32i16_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv32i8_nxv32i16_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vnclipu_mask_wv_nxv1i16_nxv1i32_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv1i16_nxv1i32_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -308,7 +308,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vnclipu_mask_wv_nxv2i16_nxv2i32_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv2i16_nxv2i32_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vnclipu_mask_wv_nxv4i16_nxv4i32_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv4i16_nxv4i32_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vnclipu_mask_wv_nxv8i16_nxv8i32_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv8i16_nxv8i32_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vnclipu_mask_wv_nxv16i16_nxv16i32_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv16i16_nxv16i32_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vnclipu_mask_wv_nxv1i32_nxv1i64_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv1i32_nxv1i64_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -508,7 +508,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vnclipu_mask_wv_nxv2i32_nxv2i64_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv2i32_nxv2i64_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -548,7 +548,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vnclipu_mask_wv_nxv4i32_nxv4i64_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv4i32_nxv4i64_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -588,7 +588,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vnclipu_mask_wv_nxv8i32_nxv8i64_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv8i32_nxv8i64_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -628,7 +628,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i8(
 define <vscale x 1 x i8> @intrinsic_vnclipu_mask_wx_nxv1i8_nxv1i16_i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv1i8_nxv1i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i8(
     <vscale x 1 x i8> %0,
@@ -668,7 +668,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i8(
 define <vscale x 2 x i8> @intrinsic_vnclipu_mask_wx_nxv2i8_nxv2i16_i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv2i8_nxv2i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i8(
     <vscale x 2 x i8> %0,
@@ -708,7 +708,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i8(
 define <vscale x 4 x i8> @intrinsic_vnclipu_mask_wx_nxv4i8_nxv4i16_i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv4i8_nxv4i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i8(
     <vscale x 4 x i8> %0,
@@ -748,7 +748,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i8(
 define <vscale x 8 x i8> @intrinsic_vnclipu_mask_wx_nxv8i8_nxv8i16_i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv8i8_nxv8i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i8(
     <vscale x 8 x i8> %0,
@@ -788,7 +788,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i8(
 define <vscale x 16 x i8> @intrinsic_vnclipu_mask_wx_nxv16i8_nxv16i16_i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv16i8_nxv16i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i8(
     <vscale x 16 x i8> %0,
@@ -828,7 +828,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i8(
 define <vscale x 32 x i8> @intrinsic_vnclipu_mask_wx_nxv32i8_nxv32i16_i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv32i8_nxv32i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i8(
     <vscale x 32 x i8> %0,
@@ -868,7 +868,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i16(
 define <vscale x 1 x i16> @intrinsic_vnclipu_mask_wx_nxv1i16_nxv1i32_i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv1i16_nxv1i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i16(
     <vscale x 1 x i16> %0,
@@ -908,7 +908,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i16(
 define <vscale x 2 x i16> @intrinsic_vnclipu_mask_wx_nxv2i16_nxv2i32_i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv2i16_nxv2i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i16(
     <vscale x 2 x i16> %0,
@@ -948,7 +948,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i16(
 define <vscale x 4 x i16> @intrinsic_vnclipu_mask_wx_nxv4i16_nxv4i32_i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv4i16_nxv4i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i16(
     <vscale x 4 x i16> %0,
@@ -988,7 +988,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i16(
 define <vscale x 8 x i16> @intrinsic_vnclipu_mask_wx_nxv8i16_nxv8i32_i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv8i16_nxv8i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i16(
     <vscale x 8 x i16> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i16(
 define <vscale x 16 x i16> @intrinsic_vnclipu_mask_wx_nxv16i16_nxv16i32_i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv16i16_nxv16i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i16(
     <vscale x 16 x i16> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i32(
 define <vscale x 1 x i32> @intrinsic_vnclipu_mask_wx_nxv1i32_nxv1i64_i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv1i32_nxv1i64_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i32(
     <vscale x 1 x i32> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i32(
 define <vscale x 2 x i32> @intrinsic_vnclipu_mask_wx_nxv2i32_nxv2i64_i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv2i32_nxv2i64_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i32(
     <vscale x 2 x i32> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i32(
 define <vscale x 4 x i32> @intrinsic_vnclipu_mask_wx_nxv4i32_nxv4i64_i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv4i32_nxv4i64_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i32(
     <vscale x 4 x i32> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i32(
 define <vscale x 8 x i32> @intrinsic_vnclipu_mask_wx_nxv8i32_nxv8i64_i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv8i32_nxv8i64_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i32(
     <vscale x 8 x i32> %0,
@@ -1216,7 +1216,7 @@ entry:
 define <vscale x 1 x i8> @intrinsic_vnclipu_mask_wi_nxv1i8_nxv1i16_i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv1i8_nxv1i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i8(
     <vscale x 1 x i8> %0,
@@ -1244,7 +1244,7 @@ entry:
 define <vscale x 2 x i8> @intrinsic_vnclipu_mask_wi_nxv2i8_nxv2i16_i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv2i8_nxv2i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i8(
     <vscale x 2 x i8> %0,
@@ -1272,7 +1272,7 @@ entry:
 define <vscale x 4 x i8> @intrinsic_vnclipu_mask_wi_nxv4i8_nxv4i16_i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv4i8_nxv4i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i8(
     <vscale x 4 x i8> %0,
@@ -1300,7 +1300,7 @@ entry:
 define <vscale x 8 x i8> @intrinsic_vnclipu_mask_wi_nxv8i8_nxv8i16_i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv8i8_nxv8i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i8(
     <vscale x 8 x i8> %0,
@@ -1328,7 +1328,7 @@ entry:
 define <vscale x 16 x i8> @intrinsic_vnclipu_mask_wi_nxv16i8_nxv16i16_i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv16i8_nxv16i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i8(
     <vscale x 16 x i8> %0,
@@ -1356,7 +1356,7 @@ entry:
 define <vscale x 32 x i8> @intrinsic_vnclipu_mask_wi_nxv32i8_nxv32i16_i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv32i8_nxv32i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i8(
     <vscale x 32 x i8> %0,
@@ -1384,7 +1384,7 @@ entry:
 define <vscale x 1 x i16> @intrinsic_vnclipu_mask_wi_nxv1i16_nxv1i32_i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv1i16_nxv1i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i16(
     <vscale x 1 x i16> %0,
@@ -1412,7 +1412,7 @@ entry:
 define <vscale x 2 x i16> @intrinsic_vnclipu_mask_wi_nxv2i16_nxv2i32_i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv2i16_nxv2i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i16(
     <vscale x 2 x i16> %0,
@@ -1440,7 +1440,7 @@ entry:
 define <vscale x 4 x i16> @intrinsic_vnclipu_mask_wi_nxv4i16_nxv4i32_i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv4i16_nxv4i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i16(
     <vscale x 4 x i16> %0,
@@ -1468,7 +1468,7 @@ entry:
 define <vscale x 8 x i16> @intrinsic_vnclipu_mask_wi_nxv8i16_nxv8i32_i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv8i16_nxv8i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i16(
     <vscale x 8 x i16> %0,
@@ -1496,7 +1496,7 @@ entry:
 define <vscale x 16 x i16> @intrinsic_vnclipu_mask_wi_nxv16i16_nxv16i32_i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv16i16_nxv16i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i16(
     <vscale x 16 x i16> %0,
@@ -1524,7 +1524,7 @@ entry:
 define <vscale x 1 x i32> @intrinsic_vnclipu_mask_wi_nxv1i32_nxv1i64_i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv1i32_nxv1i64_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i32(
     <vscale x 1 x i32> %0,
@@ -1552,7 +1552,7 @@ entry:
 define <vscale x 2 x i32> @intrinsic_vnclipu_mask_wi_nxv2i32_nxv2i64_i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv2i32_nxv2i64_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i32(
     <vscale x 2 x i32> %0,
@@ -1580,7 +1580,7 @@ entry:
 define <vscale x 4 x i32> @intrinsic_vnclipu_mask_wi_nxv4i32_nxv4i64_i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv4i32_nxv4i64_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i32(
     <vscale x 4 x i32> %0,
@@ -1608,7 +1608,7 @@ entry:
 define <vscale x 8 x i32> @intrinsic_vnclipu_mask_wi_nxv8i32_nxv8i64_i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv8i32_nxv8i64_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i32(
     <vscale x 8 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv32.ll
index e6997482a870..1e99fc9643f8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv32.ll
@@ -9,7 +9,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.nxv1i8(
 define <vscale x 1 x i8>  @intrinsic_vnmsac_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -30,7 +30,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8>  @intrinsic_vnmsac_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -51,7 +51,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.nxv2i8(
 define <vscale x 2 x i8>  @intrinsic_vnmsac_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -72,7 +72,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8>  @intrinsic_vnmsac_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -93,7 +93,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.nxv4i8(
 define <vscale x 4 x i8>  @intrinsic_vnmsac_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -114,7 +114,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8>  @intrinsic_vnmsac_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -135,7 +135,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.nxv8i8(
 define <vscale x 8 x i8>  @intrinsic_vnmsac_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -156,7 +156,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8>  @intrinsic_vnmsac_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -177,7 +177,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.nxv16i8(
 define <vscale x 16 x i8>  @intrinsic_vnmsac_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -198,7 +198,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8>  @intrinsic_vnmsac_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -219,7 +219,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.nxv32i8(
 define <vscale x 32 x i8>  @intrinsic_vnmsac_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -240,7 +240,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8>  @intrinsic_vnmsac_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -261,7 +261,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.nxv1i16(
 define <vscale x 1 x i16>  @intrinsic_vnmsac_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -282,7 +282,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16>  @intrinsic_vnmsac_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -303,7 +303,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.nxv2i16(
 define <vscale x 2 x i16>  @intrinsic_vnmsac_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -324,7 +324,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16>  @intrinsic_vnmsac_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -345,7 +345,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.nxv4i16(
 define <vscale x 4 x i16>  @intrinsic_vnmsac_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -366,7 +366,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16>  @intrinsic_vnmsac_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -387,7 +387,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.nxv8i16(
 define <vscale x 8 x i16>  @intrinsic_vnmsac_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -408,7 +408,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16>  @intrinsic_vnmsac_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -429,7 +429,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.nxv16i16(
 define <vscale x 16 x i16>  @intrinsic_vnmsac_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -450,7 +450,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16>  @intrinsic_vnmsac_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -471,7 +471,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.nxv1i32(
 define <vscale x 1 x i32>  @intrinsic_vnmsac_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -492,7 +492,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32>  @intrinsic_vnmsac_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -513,7 +513,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.nxv2i32(
 define <vscale x 2 x i32>  @intrinsic_vnmsac_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -534,7 +534,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32>  @intrinsic_vnmsac_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -555,7 +555,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.nxv4i32(
 define <vscale x 4 x i32>  @intrinsic_vnmsac_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -576,7 +576,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32>  @intrinsic_vnmsac_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -597,7 +597,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.nxv8i32(
 define <vscale x 8 x i32>  @intrinsic_vnmsac_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -618,7 +618,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32>  @intrinsic_vnmsac_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -639,7 +639,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.i8(
 define <vscale x 1 x i8>  @intrinsic_vnmsac_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv1i8_i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -660,7 +660,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vnmsac_mask_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv1i8_i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -681,7 +681,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.i8(
 define <vscale x 2 x i8>  @intrinsic_vnmsac_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv2i8_i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -702,7 +702,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vnmsac_mask_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv2i8_i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -723,7 +723,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.i8(
 define <vscale x 4 x i8>  @intrinsic_vnmsac_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv4i8_i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -744,7 +744,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vnmsac_mask_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv4i8_i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -765,7 +765,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.i8(
 define <vscale x 8 x i8>  @intrinsic_vnmsac_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv8i8_i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -786,7 +786,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vnmsac_mask_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv8i8_i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -807,7 +807,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.i8(
 define <vscale x 16 x i8>  @intrinsic_vnmsac_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv16i8_i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -828,7 +828,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vnmsac_mask_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv16i8_i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -849,7 +849,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.i8(
 define <vscale x 32 x i8>  @intrinsic_vnmsac_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv32i8_i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -870,7 +870,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vnmsac_mask_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv32i8_i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -891,7 +891,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.i16(
 define <vscale x 1 x i16>  @intrinsic_vnmsac_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv1i16_i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -912,7 +912,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vnmsac_mask_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv1i16_i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -933,7 +933,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.i16(
 define <vscale x 2 x i16>  @intrinsic_vnmsac_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv2i16_i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -954,7 +954,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vnmsac_mask_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv2i16_i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -975,7 +975,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.i16(
 define <vscale x 4 x i16>  @intrinsic_vnmsac_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv4i16_i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -996,7 +996,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vnmsac_mask_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv4i16_i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1017,7 +1017,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.i16(
 define <vscale x 8 x i16>  @intrinsic_vnmsac_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv8i16_i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1038,7 +1038,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vnmsac_mask_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv8i16_i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1059,7 +1059,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.i16(
 define <vscale x 16 x i16>  @intrinsic_vnmsac_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv16i16_i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1080,7 +1080,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vnmsac_mask_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv16i16_i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1101,7 +1101,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.i32(
 define <vscale x 1 x i32>  @intrinsic_vnmsac_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv1i32_i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1122,7 +1122,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vnmsac_mask_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv1i32_i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1143,7 +1143,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.i32(
 define <vscale x 2 x i32>  @intrinsic_vnmsac_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv2i32_i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1164,7 +1164,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vnmsac_mask_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv2i32_i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1185,7 +1185,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.i32(
 define <vscale x 4 x i32>  @intrinsic_vnmsac_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv4i32_i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1206,7 +1206,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vnmsac_mask_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv4i32_i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1227,7 +1227,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.i32(
 define <vscale x 8 x i32>  @intrinsic_vnmsac_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv8i32_i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1248,7 +1248,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vnmsac_mask_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv8i32_i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv64.ll
index d79c4f6deeff..bef5fca7778c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv64.ll
@@ -9,7 +9,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.nxv1i8(
 define <vscale x 1 x i8>  @intrinsic_vnmsac_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -30,7 +30,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8>  @intrinsic_vnmsac_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -51,7 +51,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.nxv2i8(
 define <vscale x 2 x i8>  @intrinsic_vnmsac_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -72,7 +72,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8>  @intrinsic_vnmsac_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -93,7 +93,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.nxv4i8(
 define <vscale x 4 x i8>  @intrinsic_vnmsac_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -114,7 +114,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8>  @intrinsic_vnmsac_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -135,7 +135,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.nxv8i8(
 define <vscale x 8 x i8>  @intrinsic_vnmsac_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -156,7 +156,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8>  @intrinsic_vnmsac_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -177,7 +177,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.nxv16i8(
 define <vscale x 16 x i8>  @intrinsic_vnmsac_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -198,7 +198,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8>  @intrinsic_vnmsac_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -219,7 +219,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.nxv32i8(
 define <vscale x 32 x i8>  @intrinsic_vnmsac_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -240,7 +240,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8>  @intrinsic_vnmsac_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -261,7 +261,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.nxv1i16(
 define <vscale x 1 x i16>  @intrinsic_vnmsac_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -282,7 +282,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16>  @intrinsic_vnmsac_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -303,7 +303,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.nxv2i16(
 define <vscale x 2 x i16>  @intrinsic_vnmsac_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -324,7 +324,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16>  @intrinsic_vnmsac_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -345,7 +345,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.nxv4i16(
 define <vscale x 4 x i16>  @intrinsic_vnmsac_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -366,7 +366,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16>  @intrinsic_vnmsac_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -387,7 +387,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.nxv8i16(
 define <vscale x 8 x i16>  @intrinsic_vnmsac_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -408,7 +408,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16>  @intrinsic_vnmsac_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -429,7 +429,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.nxv16i16(
 define <vscale x 16 x i16>  @intrinsic_vnmsac_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -450,7 +450,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16>  @intrinsic_vnmsac_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -471,7 +471,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.nxv1i32(
 define <vscale x 1 x i32>  @intrinsic_vnmsac_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -492,7 +492,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32>  @intrinsic_vnmsac_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -513,7 +513,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.nxv2i32(
 define <vscale x 2 x i32>  @intrinsic_vnmsac_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -534,7 +534,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32>  @intrinsic_vnmsac_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -555,7 +555,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.nxv4i32(
 define <vscale x 4 x i32>  @intrinsic_vnmsac_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -576,7 +576,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32>  @intrinsic_vnmsac_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -597,7 +597,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.nxv8i32(
 define <vscale x 8 x i32>  @intrinsic_vnmsac_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -618,7 +618,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32>  @intrinsic_vnmsac_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -639,7 +639,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.nxv1i64(
 define <vscale x 1 x i64>  @intrinsic_vnmsac_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv1i64_nxv1i64_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -660,7 +660,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64>  @intrinsic_vnmsac_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i64_nxv1i64_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -681,7 +681,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.nxv2i64(
 define <vscale x 2 x i64>  @intrinsic_vnmsac_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv2i64_nxv2i64_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -702,7 +702,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64(
 define <vscale x 2 x i64>  @intrinsic_vnmsac_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i64_nxv2i64_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -723,7 +723,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.nxv4i64(
 define <vscale x 4 x i64>  @intrinsic_vnmsac_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv4i64_nxv4i64_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -744,7 +744,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64(
 define <vscale x 4 x i64>  @intrinsic_vnmsac_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i64_nxv4i64_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vnmsac.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -765,7 +765,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.i8(
 define <vscale x 1 x i8>  @intrinsic_vnmsac_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv1i8_i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -786,7 +786,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vnmsac_mask_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv1i8_i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -807,7 +807,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.i8(
 define <vscale x 2 x i8>  @intrinsic_vnmsac_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv2i8_i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -828,7 +828,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vnmsac_mask_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv2i8_i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -849,7 +849,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.i8(
 define <vscale x 4 x i8>  @intrinsic_vnmsac_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv4i8_i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -870,7 +870,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vnmsac_mask_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv4i8_i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -891,7 +891,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.i8(
 define <vscale x 8 x i8>  @intrinsic_vnmsac_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv8i8_i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -912,7 +912,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vnmsac_mask_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv8i8_i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -933,7 +933,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.i8(
 define <vscale x 16 x i8>  @intrinsic_vnmsac_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv16i8_i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -954,7 +954,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vnmsac_mask_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv16i8_i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -975,7 +975,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.i8(
 define <vscale x 32 x i8>  @intrinsic_vnmsac_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv32i8_i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -996,7 +996,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vnmsac_mask_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv32i8_i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1017,7 +1017,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.i16(
 define <vscale x 1 x i16>  @intrinsic_vnmsac_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv1i16_i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1038,7 +1038,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vnmsac_mask_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv1i16_i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1059,7 +1059,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.i16(
 define <vscale x 2 x i16>  @intrinsic_vnmsac_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv2i16_i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1080,7 +1080,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vnmsac_mask_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv2i16_i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1101,7 +1101,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.i16(
 define <vscale x 4 x i16>  @intrinsic_vnmsac_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv4i16_i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1122,7 +1122,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vnmsac_mask_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv4i16_i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1143,7 +1143,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.i16(
 define <vscale x 8 x i16>  @intrinsic_vnmsac_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv8i16_i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1164,7 +1164,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vnmsac_mask_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv8i16_i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1185,7 +1185,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.i16(
 define <vscale x 16 x i16>  @intrinsic_vnmsac_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv16i16_i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1206,7 +1206,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vnmsac_mask_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv16i16_i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1227,7 +1227,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.i32(
 define <vscale x 1 x i32>  @intrinsic_vnmsac_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv1i32_i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1248,7 +1248,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vnmsac_mask_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv1i32_i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1269,7 +1269,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.i32(
 define <vscale x 2 x i32>  @intrinsic_vnmsac_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv2i32_i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1290,7 +1290,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vnmsac_mask_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv2i32_i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1311,7 +1311,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.i32(
 define <vscale x 4 x i32>  @intrinsic_vnmsac_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv4i32_i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1332,7 +1332,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vnmsac_mask_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv4i32_i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1353,7 +1353,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.i32(
 define <vscale x 8 x i32>  @intrinsic_vnmsac_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv8i32_i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1374,7 +1374,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vnmsac_mask_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv8i32_i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1395,7 +1395,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.i64(
 define <vscale x 1 x i64>  @intrinsic_vnmsac_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv1i64_i64_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1416,7 +1416,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.i64(
 define <vscale x 1 x i64> @intrinsic_vnmsac_mask_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv1i64_i64_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1437,7 +1437,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.i64(
 define <vscale x 2 x i64>  @intrinsic_vnmsac_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv2i64_i64_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1458,7 +1458,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.i64(
 define <vscale x 2 x i64> @intrinsic_vnmsac_mask_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv2i64_i64_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1479,7 +1479,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.i64(
 define <vscale x 4 x i64>  @intrinsic_vnmsac_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv4i64_i64_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1500,7 +1500,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.i64(
 define <vscale x 4 x i64> @intrinsic_vnmsac_mask_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv4i64_i64_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vnmsac.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv32.ll
index 3c01f60e9df5..0481f3614781 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv32.ll
@@ -9,7 +9,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.nxv1i8(
 define <vscale x 1 x i8>  @intrinsic_vnmsub_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -30,7 +30,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8>  @intrinsic_vnmsub_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -51,7 +51,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.nxv2i8(
 define <vscale x 2 x i8>  @intrinsic_vnmsub_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -72,7 +72,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8>  @intrinsic_vnmsub_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -93,7 +93,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.nxv4i8(
 define <vscale x 4 x i8>  @intrinsic_vnmsub_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -114,7 +114,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8>  @intrinsic_vnmsub_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -135,7 +135,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.nxv8i8(
 define <vscale x 8 x i8>  @intrinsic_vnmsub_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -156,7 +156,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8>  @intrinsic_vnmsub_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -177,7 +177,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.nxv16i8(
 define <vscale x 16 x i8>  @intrinsic_vnmsub_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -198,7 +198,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8>  @intrinsic_vnmsub_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -219,7 +219,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.nxv32i8(
 define <vscale x 32 x i8>  @intrinsic_vnmsub_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -240,7 +240,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8>  @intrinsic_vnmsub_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -261,7 +261,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.nxv1i16(
 define <vscale x 1 x i16>  @intrinsic_vnmsub_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -282,7 +282,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16>  @intrinsic_vnmsub_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -303,7 +303,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.nxv2i16(
 define <vscale x 2 x i16>  @intrinsic_vnmsub_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -324,7 +324,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16>  @intrinsic_vnmsub_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -345,7 +345,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.nxv4i16(
 define <vscale x 4 x i16>  @intrinsic_vnmsub_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -366,7 +366,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16>  @intrinsic_vnmsub_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -387,7 +387,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.nxv8i16(
 define <vscale x 8 x i16>  @intrinsic_vnmsub_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -408,7 +408,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16>  @intrinsic_vnmsub_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -429,7 +429,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.nxv16i16(
 define <vscale x 16 x i16>  @intrinsic_vnmsub_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -450,7 +450,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16>  @intrinsic_vnmsub_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -471,7 +471,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.nxv1i32(
 define <vscale x 1 x i32>  @intrinsic_vnmsub_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -492,7 +492,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32>  @intrinsic_vnmsub_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -513,7 +513,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.nxv2i32(
 define <vscale x 2 x i32>  @intrinsic_vnmsub_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -534,7 +534,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32>  @intrinsic_vnmsub_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -555,7 +555,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.nxv4i32(
 define <vscale x 4 x i32>  @intrinsic_vnmsub_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -576,7 +576,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32>  @intrinsic_vnmsub_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -597,7 +597,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.nxv8i32(
 define <vscale x 8 x i32>  @intrinsic_vnmsub_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -618,7 +618,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32>  @intrinsic_vnmsub_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -639,7 +639,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.i8(
 define <vscale x 1 x i8>  @intrinsic_vnmsub_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv1i8_i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -660,7 +660,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vnmsub_mask_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i8_i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -681,7 +681,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.i8(
 define <vscale x 2 x i8>  @intrinsic_vnmsub_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv2i8_i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -702,7 +702,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vnmsub_mask_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i8_i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -723,7 +723,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.i8(
 define <vscale x 4 x i8>  @intrinsic_vnmsub_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv4i8_i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -744,7 +744,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vnmsub_mask_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i8_i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -765,7 +765,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.i8(
 define <vscale x 8 x i8>  @intrinsic_vnmsub_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv8i8_i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -786,7 +786,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vnmsub_mask_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv8i8_i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -807,7 +807,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.i8(
 define <vscale x 16 x i8>  @intrinsic_vnmsub_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv16i8_i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -828,7 +828,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vnmsub_mask_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv16i8_i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -849,7 +849,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.i8(
 define <vscale x 32 x i8>  @intrinsic_vnmsub_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv32i8_i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -870,7 +870,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vnmsub_mask_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv32i8_i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -891,7 +891,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.i16(
 define <vscale x 1 x i16>  @intrinsic_vnmsub_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv1i16_i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -912,7 +912,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vnmsub_mask_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i16_i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -933,7 +933,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.i16(
 define <vscale x 2 x i16>  @intrinsic_vnmsub_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv2i16_i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -954,7 +954,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vnmsub_mask_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i16_i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -975,7 +975,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.i16(
 define <vscale x 4 x i16>  @intrinsic_vnmsub_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv4i16_i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -996,7 +996,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vnmsub_mask_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i16_i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1017,7 +1017,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.i16(
 define <vscale x 8 x i16>  @intrinsic_vnmsub_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv8i16_i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1038,7 +1038,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vnmsub_mask_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv8i16_i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1059,7 +1059,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.i16(
 define <vscale x 16 x i16>  @intrinsic_vnmsub_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv16i16_i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1080,7 +1080,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vnmsub_mask_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv16i16_i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1101,7 +1101,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.i32(
 define <vscale x 1 x i32>  @intrinsic_vnmsub_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv1i32_i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1122,7 +1122,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vnmsub_mask_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i32_i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1143,7 +1143,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.i32(
 define <vscale x 2 x i32>  @intrinsic_vnmsub_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv2i32_i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1164,7 +1164,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vnmsub_mask_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i32_i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1185,7 +1185,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.i32(
 define <vscale x 4 x i32>  @intrinsic_vnmsub_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv4i32_i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1206,7 +1206,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vnmsub_mask_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i32_i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1227,7 +1227,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.i32(
 define <vscale x 8 x i32>  @intrinsic_vnmsub_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv8i32_i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1248,7 +1248,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vnmsub_mask_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv8i32_i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv64.ll
index dd9d6ec2280b..39832c5786c9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv64.ll
@@ -9,7 +9,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.nxv1i8(
 define <vscale x 1 x i8>  @intrinsic_vnmsub_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -30,7 +30,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8>  @intrinsic_vnmsub_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -51,7 +51,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.nxv2i8(
 define <vscale x 2 x i8>  @intrinsic_vnmsub_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -72,7 +72,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8>  @intrinsic_vnmsub_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -93,7 +93,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.nxv4i8(
 define <vscale x 4 x i8>  @intrinsic_vnmsub_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -114,7 +114,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8>  @intrinsic_vnmsub_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -135,7 +135,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.nxv8i8(
 define <vscale x 8 x i8>  @intrinsic_vnmsub_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -156,7 +156,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8>  @intrinsic_vnmsub_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -177,7 +177,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.nxv16i8(
 define <vscale x 16 x i8>  @intrinsic_vnmsub_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -198,7 +198,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8>  @intrinsic_vnmsub_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -219,7 +219,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.nxv32i8(
 define <vscale x 32 x i8>  @intrinsic_vnmsub_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -240,7 +240,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8>  @intrinsic_vnmsub_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -261,7 +261,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.nxv1i16(
 define <vscale x 1 x i16>  @intrinsic_vnmsub_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -282,7 +282,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16>  @intrinsic_vnmsub_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -303,7 +303,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.nxv2i16(
 define <vscale x 2 x i16>  @intrinsic_vnmsub_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -324,7 +324,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16>  @intrinsic_vnmsub_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -345,7 +345,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.nxv4i16(
 define <vscale x 4 x i16>  @intrinsic_vnmsub_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -366,7 +366,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16>  @intrinsic_vnmsub_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -387,7 +387,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.nxv8i16(
 define <vscale x 8 x i16>  @intrinsic_vnmsub_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -408,7 +408,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16>  @intrinsic_vnmsub_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -429,7 +429,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.nxv16i16(
 define <vscale x 16 x i16>  @intrinsic_vnmsub_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -450,7 +450,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16>  @intrinsic_vnmsub_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -471,7 +471,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.nxv1i32(
 define <vscale x 1 x i32>  @intrinsic_vnmsub_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -492,7 +492,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32>  @intrinsic_vnmsub_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -513,7 +513,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.nxv2i32(
 define <vscale x 2 x i32>  @intrinsic_vnmsub_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -534,7 +534,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32>  @intrinsic_vnmsub_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -555,7 +555,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.nxv4i32(
 define <vscale x 4 x i32>  @intrinsic_vnmsub_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -576,7 +576,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32>  @intrinsic_vnmsub_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -597,7 +597,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.nxv8i32(
 define <vscale x 8 x i32>  @intrinsic_vnmsub_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -618,7 +618,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32>  @intrinsic_vnmsub_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -639,7 +639,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.nxv1i64(
 define <vscale x 1 x i64>  @intrinsic_vnmsub_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv1i64_nxv1i64_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -660,7 +660,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64>  @intrinsic_vnmsub_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i64_nxv1i64_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -681,7 +681,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.nxv2i64(
 define <vscale x 2 x i64>  @intrinsic_vnmsub_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv2i64_nxv2i64_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -702,7 +702,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64(
 define <vscale x 2 x i64>  @intrinsic_vnmsub_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i64_nxv2i64_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -723,7 +723,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.nxv4i64(
 define <vscale x 4 x i64>  @intrinsic_vnmsub_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv4i64_nxv4i64_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -744,7 +744,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64(
 define <vscale x 4 x i64>  @intrinsic_vnmsub_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i64_nxv4i64_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vnmsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -765,7 +765,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.i8(
 define <vscale x 1 x i8>  @intrinsic_vnmsub_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv1i8_i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -786,7 +786,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vnmsub_mask_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i8_i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -807,7 +807,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.i8(
 define <vscale x 2 x i8>  @intrinsic_vnmsub_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv2i8_i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -828,7 +828,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vnmsub_mask_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i8_i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -849,7 +849,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.i8(
 define <vscale x 4 x i8>  @intrinsic_vnmsub_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv4i8_i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -870,7 +870,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vnmsub_mask_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i8_i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -891,7 +891,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.i8(
 define <vscale x 8 x i8>  @intrinsic_vnmsub_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv8i8_i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -912,7 +912,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vnmsub_mask_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv8i8_i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -933,7 +933,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.i8(
 define <vscale x 16 x i8>  @intrinsic_vnmsub_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv16i8_i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -954,7 +954,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vnmsub_mask_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv16i8_i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -975,7 +975,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.i8(
 define <vscale x 32 x i8>  @intrinsic_vnmsub_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv32i8_i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -996,7 +996,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vnmsub_mask_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv32i8_i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1017,7 +1017,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.i16(
 define <vscale x 1 x i16>  @intrinsic_vnmsub_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv1i16_i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1038,7 +1038,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vnmsub_mask_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i16_i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1059,7 +1059,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.i16(
 define <vscale x 2 x i16>  @intrinsic_vnmsub_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv2i16_i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1080,7 +1080,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vnmsub_mask_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i16_i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1101,7 +1101,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.i16(
 define <vscale x 4 x i16>  @intrinsic_vnmsub_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv4i16_i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1122,7 +1122,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vnmsub_mask_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i16_i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1143,7 +1143,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.i16(
 define <vscale x 8 x i16>  @intrinsic_vnmsub_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv8i16_i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1164,7 +1164,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vnmsub_mask_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv8i16_i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1185,7 +1185,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.i16(
 define <vscale x 16 x i16>  @intrinsic_vnmsub_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv16i16_i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1206,7 +1206,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vnmsub_mask_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv16i16_i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1227,7 +1227,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.i32(
 define <vscale x 1 x i32>  @intrinsic_vnmsub_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv1i32_i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1248,7 +1248,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vnmsub_mask_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i32_i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1269,7 +1269,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.i32(
 define <vscale x 2 x i32>  @intrinsic_vnmsub_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv2i32_i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1290,7 +1290,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vnmsub_mask_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i32_i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1311,7 +1311,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.i32(
 define <vscale x 4 x i32>  @intrinsic_vnmsub_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv4i32_i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1332,7 +1332,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vnmsub_mask_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i32_i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1353,7 +1353,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.i32(
 define <vscale x 8 x i32>  @intrinsic_vnmsub_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv8i32_i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1374,7 +1374,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vnmsub_mask_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv8i32_i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1395,7 +1395,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.i64(
 define <vscale x 1 x i64>  @intrinsic_vnmsub_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv1i64_i64_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1416,7 +1416,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.i64(
 define <vscale x 1 x i64> @intrinsic_vnmsub_mask_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i64_i64_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1437,7 +1437,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.i64(
 define <vscale x 2 x i64>  @intrinsic_vnmsub_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv2i64_i64_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1458,7 +1458,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.i64(
 define <vscale x 2 x i64> @intrinsic_vnmsub_mask_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i64_i64_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1479,7 +1479,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.i64(
 define <vscale x 4 x i64>  @intrinsic_vnmsub_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv4i64_i64_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}
   %a = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1500,7 +1500,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.i64(
 define <vscale x 4 x i64> @intrinsic_vnmsub_mask_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i64_i64_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vnmsub.vx {{v[0-9]+}}, a0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vnsra-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnsra-rv32.ll
index 3e94789913f0..d46b04fabe0d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnsra-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnsra-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vnsra_mask_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv1i8_nxv1i16_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vnsra.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vnsra_mask_wv_nxv2i8_nxv2i16_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv2i8_nxv2i16_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vnsra.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vnsra_mask_wv_nxv4i8_nxv4i16_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv4i8_nxv4i16_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vnsra.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vnsra_mask_wv_nxv8i8_nxv8i16_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv8i8_nxv8i16_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vnsra.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vnsra_mask_wv_nxv16i8_nxv16i16_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv16i8_nxv16i16_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vnsra.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vnsra_mask_wv_nxv32i8_nxv32i16_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv32i8_nxv32i16_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vnsra.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vnsra_mask_wv_nxv1i16_nxv1i32_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv1i16_nxv1i32_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vnsra.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -308,7 +308,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vnsra_mask_wv_nxv2i16_nxv2i32_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv2i16_nxv2i32_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vnsra.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vnsra_mask_wv_nxv4i16_nxv4i32_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv4i16_nxv4i32_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vnsra.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vnsra_mask_wv_nxv8i16_nxv8i32_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv8i16_nxv8i32_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vnsra.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vnsra_mask_wv_nxv16i16_nxv16i32_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv16i16_nxv16i32_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vnsra.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.i8(
 define <vscale x 1 x i8> @intrinsic_vnsra_mask_wx_nxv1i8_nxv1i16_i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv1i8_nxv1i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vnsra.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.i8(
     <vscale x 1 x i8> %0,
@@ -508,7 +508,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.i8(
 define <vscale x 2 x i8> @intrinsic_vnsra_mask_wx_nxv2i8_nxv2i16_i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv2i8_nxv2i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vnsra.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.i8(
     <vscale x 2 x i8> %0,
@@ -548,7 +548,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.i8(
 define <vscale x 4 x i8> @intrinsic_vnsra_mask_wx_nxv4i8_nxv4i16_i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv4i8_nxv4i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vnsra.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.i8(
     <vscale x 4 x i8> %0,
@@ -588,7 +588,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.i8(
 define <vscale x 8 x i8> @intrinsic_vnsra_mask_wx_nxv8i8_nxv8i16_i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv8i8_nxv8i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vnsra.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.i8(
     <vscale x 8 x i8> %0,
@@ -628,7 +628,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.i8(
 define <vscale x 16 x i8> @intrinsic_vnsra_mask_wx_nxv16i8_nxv16i16_i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv16i8_nxv16i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vnsra.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.i8(
     <vscale x 16 x i8> %0,
@@ -668,7 +668,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.i8(
 define <vscale x 32 x i8> @intrinsic_vnsra_mask_wx_nxv32i8_nxv32i16_i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv32i8_nxv32i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vnsra.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.i8(
     <vscale x 32 x i8> %0,
@@ -708,7 +708,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.i16(
 define <vscale x 1 x i16> @intrinsic_vnsra_mask_wx_nxv1i16_nxv1i32_i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv1i16_nxv1i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vnsra.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.i16(
     <vscale x 1 x i16> %0,
@@ -748,7 +748,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.i16(
 define <vscale x 2 x i16> @intrinsic_vnsra_mask_wx_nxv2i16_nxv2i32_i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv2i16_nxv2i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vnsra.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.i16(
     <vscale x 2 x i16> %0,
@@ -788,7 +788,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.i16(
 define <vscale x 4 x i16> @intrinsic_vnsra_mask_wx_nxv4i16_nxv4i32_i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv4i16_nxv4i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vnsra.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.i16(
     <vscale x 4 x i16> %0,
@@ -828,7 +828,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.i16(
 define <vscale x 8 x i16> @intrinsic_vnsra_mask_wx_nxv8i16_nxv8i32_i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv8i16_nxv8i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vnsra.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.i16(
     <vscale x 8 x i16> %0,
@@ -868,7 +868,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.i16(
 define <vscale x 16 x i16> @intrinsic_vnsra_mask_wx_nxv16i16_nxv16i32_i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv16i16_nxv16i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vnsra.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.i16(
     <vscale x 16 x i16> %0,
@@ -896,7 +896,7 @@ entry:
 define <vscale x 1 x i8> @intrinsic_vnsra_mask_wi_nxv1i8_nxv1i16_i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv1i8_nxv1i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vnsra.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.i8(
     <vscale x 1 x i8> %0,
@@ -924,7 +924,7 @@ entry:
 define <vscale x 2 x i8> @intrinsic_vnsra_mask_wi_nxv2i8_nxv2i16_i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv2i8_nxv2i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vnsra.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.i8(
     <vscale x 2 x i8> %0,
@@ -952,7 +952,7 @@ entry:
 define <vscale x 4 x i8> @intrinsic_vnsra_mask_wi_nxv4i8_nxv4i16_i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv4i8_nxv4i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vnsra.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.i8(
     <vscale x 4 x i8> %0,
@@ -980,7 +980,7 @@ entry:
 define <vscale x 8 x i8> @intrinsic_vnsra_mask_wi_nxv8i8_nxv8i16_i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv8i8_nxv8i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vnsra.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.i8(
     <vscale x 8 x i8> %0,
@@ -1008,7 +1008,7 @@ entry:
 define <vscale x 16 x i8> @intrinsic_vnsra_mask_wi_nxv16i8_nxv16i16_i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv16i8_nxv16i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vnsra.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.i8(
     <vscale x 16 x i8> %0,
@@ -1036,7 +1036,7 @@ entry:
 define <vscale x 32 x i8> @intrinsic_vnsra_mask_wi_nxv32i8_nxv32i16_i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv32i8_nxv32i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vnsra.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.i8(
     <vscale x 32 x i8> %0,
@@ -1064,7 +1064,7 @@ entry:
 define <vscale x 1 x i16> @intrinsic_vnsra_mask_wi_nxv1i16_nxv1i32_i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv1i16_nxv1i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vnsra.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.i16(
     <vscale x 1 x i16> %0,
@@ -1092,7 +1092,7 @@ entry:
 define <vscale x 2 x i16> @intrinsic_vnsra_mask_wi_nxv2i16_nxv2i32_i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv2i16_nxv2i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vnsra.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.i16(
     <vscale x 2 x i16> %0,
@@ -1120,7 +1120,7 @@ entry:
 define <vscale x 4 x i16> @intrinsic_vnsra_mask_wi_nxv4i16_nxv4i32_i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv4i16_nxv4i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vnsra.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.i16(
     <vscale x 4 x i16> %0,
@@ -1148,7 +1148,7 @@ entry:
 define <vscale x 8 x i16> @intrinsic_vnsra_mask_wi_nxv8i16_nxv8i32_i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv8i16_nxv8i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vnsra.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.i16(
     <vscale x 8 x i16> %0,
@@ -1176,7 +1176,7 @@ entry:
 define <vscale x 16 x i16> @intrinsic_vnsra_mask_wi_nxv16i16_nxv16i32_i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv16i16_nxv16i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vnsra.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.i16(
     <vscale x 16 x i16> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vnsra-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnsra-rv64.ll
index f2c8e641a77d..406e963b8519 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnsra-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnsra-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vnsra_mask_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv1i8_nxv1i16_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vnsra.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vnsra_mask_wv_nxv2i8_nxv2i16_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv2i8_nxv2i16_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vnsra.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vnsra_mask_wv_nxv4i8_nxv4i16_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv4i8_nxv4i16_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vnsra.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vnsra_mask_wv_nxv8i8_nxv8i16_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv8i8_nxv8i16_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vnsra.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vnsra_mask_wv_nxv16i8_nxv16i16_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv16i8_nxv16i16_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vnsra.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vnsra_mask_wv_nxv32i8_nxv32i16_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv32i8_nxv32i16_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vnsra.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vnsra_mask_wv_nxv1i16_nxv1i32_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv1i16_nxv1i32_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vnsra.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -308,7 +308,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vnsra_mask_wv_nxv2i16_nxv2i32_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv2i16_nxv2i32_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vnsra.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vnsra_mask_wv_nxv4i16_nxv4i32_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv4i16_nxv4i32_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vnsra.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vnsra_mask_wv_nxv8i16_nxv8i32_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv8i16_nxv8i32_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vnsra.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vnsra_mask_wv_nxv16i16_nxv16i32_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv16i16_nxv16i32_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vnsra.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vnsra_mask_wv_nxv1i32_nxv1i64_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv1i32_nxv1i64_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vnsra.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -508,7 +508,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vnsra_mask_wv_nxv2i32_nxv2i64_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv2i32_nxv2i64_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vnsra.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -548,7 +548,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vnsra_mask_wv_nxv4i32_nxv4i64_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv4i32_nxv4i64_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vnsra.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -588,7 +588,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vnsra_mask_wv_nxv8i32_nxv8i64_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv8i32_nxv8i64_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vnsra.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -628,7 +628,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.i8(
 define <vscale x 1 x i8> @intrinsic_vnsra_mask_wx_nxv1i8_nxv1i16_i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv1i8_nxv1i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vnsra.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.i8(
     <vscale x 1 x i8> %0,
@@ -668,7 +668,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.i8(
 define <vscale x 2 x i8> @intrinsic_vnsra_mask_wx_nxv2i8_nxv2i16_i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv2i8_nxv2i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vnsra.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.i8(
     <vscale x 2 x i8> %0,
@@ -708,7 +708,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.i8(
 define <vscale x 4 x i8> @intrinsic_vnsra_mask_wx_nxv4i8_nxv4i16_i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv4i8_nxv4i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vnsra.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.i8(
     <vscale x 4 x i8> %0,
@@ -748,7 +748,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.i8(
 define <vscale x 8 x i8> @intrinsic_vnsra_mask_wx_nxv8i8_nxv8i16_i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv8i8_nxv8i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vnsra.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.i8(
     <vscale x 8 x i8> %0,
@@ -788,7 +788,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.i8(
 define <vscale x 16 x i8> @intrinsic_vnsra_mask_wx_nxv16i8_nxv16i16_i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv16i8_nxv16i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vnsra.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.i8(
     <vscale x 16 x i8> %0,
@@ -828,7 +828,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.i8(
 define <vscale x 32 x i8> @intrinsic_vnsra_mask_wx_nxv32i8_nxv32i16_i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv32i8_nxv32i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vnsra.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.i8(
     <vscale x 32 x i8> %0,
@@ -868,7 +868,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.i16(
 define <vscale x 1 x i16> @intrinsic_vnsra_mask_wx_nxv1i16_nxv1i32_i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv1i16_nxv1i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vnsra.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.i16(
     <vscale x 1 x i16> %0,
@@ -908,7 +908,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.i16(
 define <vscale x 2 x i16> @intrinsic_vnsra_mask_wx_nxv2i16_nxv2i32_i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv2i16_nxv2i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vnsra.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.i16(
     <vscale x 2 x i16> %0,
@@ -948,7 +948,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.i16(
 define <vscale x 4 x i16> @intrinsic_vnsra_mask_wx_nxv4i16_nxv4i32_i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv4i16_nxv4i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vnsra.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.i16(
     <vscale x 4 x i16> %0,
@@ -988,7 +988,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.i16(
 define <vscale x 8 x i16> @intrinsic_vnsra_mask_wx_nxv8i16_nxv8i32_i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv8i16_nxv8i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vnsra.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.i16(
     <vscale x 8 x i16> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.i16(
 define <vscale x 16 x i16> @intrinsic_vnsra_mask_wx_nxv16i16_nxv16i32_i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv16i16_nxv16i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vnsra.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.i16(
     <vscale x 16 x i16> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.i32(
 define <vscale x 1 x i32> @intrinsic_vnsra_mask_wx_nxv1i32_nxv1i64_i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv1i32_nxv1i64_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vnsra.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.i32(
     <vscale x 1 x i32> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.i32(
 define <vscale x 2 x i32> @intrinsic_vnsra_mask_wx_nxv2i32_nxv2i64_i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv2i32_nxv2i64_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vnsra.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.i32(
     <vscale x 2 x i32> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.i32(
 define <vscale x 4 x i32> @intrinsic_vnsra_mask_wx_nxv4i32_nxv4i64_i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv4i32_nxv4i64_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vnsra.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.i32(
     <vscale x 4 x i32> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.i32(
 define <vscale x 8 x i32> @intrinsic_vnsra_mask_wx_nxv8i32_nxv8i64_i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wx_nxv8i32_nxv8i64_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vnsra.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.i32(
     <vscale x 8 x i32> %0,
@@ -1216,7 +1216,7 @@ entry:
 define <vscale x 1 x i8> @intrinsic_vnsra_mask_wi_nxv1i8_nxv1i16_i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv1i8_nxv1i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vnsra.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.i8(
     <vscale x 1 x i8> %0,
@@ -1244,7 +1244,7 @@ entry:
 define <vscale x 2 x i8> @intrinsic_vnsra_mask_wi_nxv2i8_nxv2i16_i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv2i8_nxv2i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vnsra.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.i8(
     <vscale x 2 x i8> %0,
@@ -1272,7 +1272,7 @@ entry:
 define <vscale x 4 x i8> @intrinsic_vnsra_mask_wi_nxv4i8_nxv4i16_i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv4i8_nxv4i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vnsra.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.i8(
     <vscale x 4 x i8> %0,
@@ -1300,7 +1300,7 @@ entry:
 define <vscale x 8 x i8> @intrinsic_vnsra_mask_wi_nxv8i8_nxv8i16_i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv8i8_nxv8i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vnsra.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.i8(
     <vscale x 8 x i8> %0,
@@ -1328,7 +1328,7 @@ entry:
 define <vscale x 16 x i8> @intrinsic_vnsra_mask_wi_nxv16i8_nxv16i16_i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv16i8_nxv16i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vnsra.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.i8(
     <vscale x 16 x i8> %0,
@@ -1356,7 +1356,7 @@ entry:
 define <vscale x 32 x i8> @intrinsic_vnsra_mask_wi_nxv32i8_nxv32i16_i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv32i8_nxv32i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vnsra.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.i8(
     <vscale x 32 x i8> %0,
@@ -1384,7 +1384,7 @@ entry:
 define <vscale x 1 x i16> @intrinsic_vnsra_mask_wi_nxv1i16_nxv1i32_i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv1i16_nxv1i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vnsra.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.i16(
     <vscale x 1 x i16> %0,
@@ -1412,7 +1412,7 @@ entry:
 define <vscale x 2 x i16> @intrinsic_vnsra_mask_wi_nxv2i16_nxv2i32_i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv2i16_nxv2i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vnsra.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.i16(
     <vscale x 2 x i16> %0,
@@ -1440,7 +1440,7 @@ entry:
 define <vscale x 4 x i16> @intrinsic_vnsra_mask_wi_nxv4i16_nxv4i32_i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv4i16_nxv4i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vnsra.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.i16(
     <vscale x 4 x i16> %0,
@@ -1468,7 +1468,7 @@ entry:
 define <vscale x 8 x i16> @intrinsic_vnsra_mask_wi_nxv8i16_nxv8i32_i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv8i16_nxv8i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vnsra.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.i16(
     <vscale x 8 x i16> %0,
@@ -1496,7 +1496,7 @@ entry:
 define <vscale x 16 x i16> @intrinsic_vnsra_mask_wi_nxv16i16_nxv16i32_i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv16i16_nxv16i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vnsra.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.i16(
     <vscale x 16 x i16> %0,
@@ -1524,7 +1524,7 @@ entry:
 define <vscale x 1 x i32> @intrinsic_vnsra_mask_wi_nxv1i32_nxv1i64_i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv1i32_nxv1i64_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vnsra.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.i32(
     <vscale x 1 x i32> %0,
@@ -1552,7 +1552,7 @@ entry:
 define <vscale x 2 x i32> @intrinsic_vnsra_mask_wi_nxv2i32_nxv2i64_i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv2i32_nxv2i64_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vnsra.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.i32(
     <vscale x 2 x i32> %0,
@@ -1580,7 +1580,7 @@ entry:
 define <vscale x 4 x i32> @intrinsic_vnsra_mask_wi_nxv4i32_nxv4i64_i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv4i32_nxv4i64_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vnsra.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.i32(
     <vscale x 4 x i32> %0,
@@ -1608,7 +1608,7 @@ entry:
 define <vscale x 8 x i32> @intrinsic_vnsra_mask_wi_nxv8i32_nxv8i64_i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsra_mask_wi_nxv8i32_nxv8i64_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vnsra.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.i32(
     <vscale x 8 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv32.ll
index 974f002751ea..a7f58b83139e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vnsrl_mask_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv1i8_nxv1i16_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vnsrl.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vnsrl_mask_wv_nxv2i8_nxv2i16_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv2i8_nxv2i16_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vnsrl.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vnsrl_mask_wv_nxv4i8_nxv4i16_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv4i8_nxv4i16_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vnsrl.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vnsrl_mask_wv_nxv8i8_nxv8i16_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv8i8_nxv8i16_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vnsrl.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vnsrl_mask_wv_nxv16i8_nxv16i16_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv16i8_nxv16i16_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vnsrl.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vnsrl_mask_wv_nxv32i8_nxv32i16_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv32i8_nxv32i16_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vnsrl.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vnsrl_mask_wv_nxv1i16_nxv1i32_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv1i16_nxv1i32_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vnsrl.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -308,7 +308,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vnsrl_mask_wv_nxv2i16_nxv2i32_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv2i16_nxv2i32_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vnsrl.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vnsrl_mask_wv_nxv4i16_nxv4i32_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv4i16_nxv4i32_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vnsrl.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vnsrl_mask_wv_nxv8i16_nxv8i32_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv8i16_nxv8i32_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vnsrl.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vnsrl_mask_wv_nxv16i16_nxv16i32_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv16i16_nxv16i32_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vnsrl.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.i8(
 define <vscale x 1 x i8> @intrinsic_vnsrl_mask_wx_nxv1i8_nxv1i16_i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv1i8_nxv1i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vnsrl.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.i8(
     <vscale x 1 x i8> %0,
@@ -508,7 +508,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.i8(
 define <vscale x 2 x i8> @intrinsic_vnsrl_mask_wx_nxv2i8_nxv2i16_i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv2i8_nxv2i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vnsrl.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.i8(
     <vscale x 2 x i8> %0,
@@ -548,7 +548,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.i8(
 define <vscale x 4 x i8> @intrinsic_vnsrl_mask_wx_nxv4i8_nxv4i16_i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv4i8_nxv4i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vnsrl.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.i8(
     <vscale x 4 x i8> %0,
@@ -588,7 +588,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.i8(
 define <vscale x 8 x i8> @intrinsic_vnsrl_mask_wx_nxv8i8_nxv8i16_i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv8i8_nxv8i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vnsrl.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.i8(
     <vscale x 8 x i8> %0,
@@ -628,7 +628,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.i8(
 define <vscale x 16 x i8> @intrinsic_vnsrl_mask_wx_nxv16i8_nxv16i16_i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv16i8_nxv16i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vnsrl.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.i8(
     <vscale x 16 x i8> %0,
@@ -668,7 +668,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.i8(
 define <vscale x 32 x i8> @intrinsic_vnsrl_mask_wx_nxv32i8_nxv32i16_i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv32i8_nxv32i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vnsrl.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.i8(
     <vscale x 32 x i8> %0,
@@ -708,7 +708,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i16(
 define <vscale x 1 x i16> @intrinsic_vnsrl_mask_wx_nxv1i16_nxv1i32_i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv1i16_nxv1i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vnsrl.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i16(
     <vscale x 1 x i16> %0,
@@ -748,7 +748,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.i16(
 define <vscale x 2 x i16> @intrinsic_vnsrl_mask_wx_nxv2i16_nxv2i32_i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv2i16_nxv2i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vnsrl.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.i16(
     <vscale x 2 x i16> %0,
@@ -788,7 +788,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.i16(
 define <vscale x 4 x i16> @intrinsic_vnsrl_mask_wx_nxv4i16_nxv4i32_i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv4i16_nxv4i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vnsrl.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.i16(
     <vscale x 4 x i16> %0,
@@ -828,7 +828,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.i16(
 define <vscale x 8 x i16> @intrinsic_vnsrl_mask_wx_nxv8i16_nxv8i32_i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv8i16_nxv8i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vnsrl.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.i16(
     <vscale x 8 x i16> %0,
@@ -868,7 +868,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.i16(
 define <vscale x 16 x i16> @intrinsic_vnsrl_mask_wx_nxv16i16_nxv16i32_i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv16i16_nxv16i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vnsrl.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.i16(
     <vscale x 16 x i16> %0,
@@ -896,7 +896,7 @@ entry:
 define <vscale x 1 x i8> @intrinsic_vnsrl_mask_wi_nxv1i8_nxv1i16_i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv1i8_nxv1i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vnsrl.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.i8(
     <vscale x 1 x i8> %0,
@@ -924,7 +924,7 @@ entry:
 define <vscale x 2 x i8> @intrinsic_vnsrl_mask_wi_nxv2i8_nxv2i16_i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv2i8_nxv2i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vnsrl.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.i8(
     <vscale x 2 x i8> %0,
@@ -952,7 +952,7 @@ entry:
 define <vscale x 4 x i8> @intrinsic_vnsrl_mask_wi_nxv4i8_nxv4i16_i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv4i8_nxv4i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vnsrl.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.i8(
     <vscale x 4 x i8> %0,
@@ -980,7 +980,7 @@ entry:
 define <vscale x 8 x i8> @intrinsic_vnsrl_mask_wi_nxv8i8_nxv8i16_i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv8i8_nxv8i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vnsrl.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.i8(
     <vscale x 8 x i8> %0,
@@ -1008,7 +1008,7 @@ entry:
 define <vscale x 16 x i8> @intrinsic_vnsrl_mask_wi_nxv16i8_nxv16i16_i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv16i8_nxv16i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vnsrl.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.i8(
     <vscale x 16 x i8> %0,
@@ -1036,7 +1036,7 @@ entry:
 define <vscale x 32 x i8> @intrinsic_vnsrl_mask_wi_nxv32i8_nxv32i16_i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv32i8_nxv32i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vnsrl.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.i8(
     <vscale x 32 x i8> %0,
@@ -1064,7 +1064,7 @@ entry:
 define <vscale x 1 x i16> @intrinsic_vnsrl_mask_wi_nxv1i16_nxv1i32_i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv1i16_nxv1i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vnsrl.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i16(
     <vscale x 1 x i16> %0,
@@ -1092,7 +1092,7 @@ entry:
 define <vscale x 2 x i16> @intrinsic_vnsrl_mask_wi_nxv2i16_nxv2i32_i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv2i16_nxv2i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vnsrl.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.i16(
     <vscale x 2 x i16> %0,
@@ -1120,7 +1120,7 @@ entry:
 define <vscale x 4 x i16> @intrinsic_vnsrl_mask_wi_nxv4i16_nxv4i32_i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv4i16_nxv4i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vnsrl.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.i16(
     <vscale x 4 x i16> %0,
@@ -1148,7 +1148,7 @@ entry:
 define <vscale x 8 x i16> @intrinsic_vnsrl_mask_wi_nxv8i16_nxv8i32_i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv8i16_nxv8i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vnsrl.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.i16(
     <vscale x 8 x i16> %0,
@@ -1176,7 +1176,7 @@ entry:
 define <vscale x 16 x i16> @intrinsic_vnsrl_mask_wi_nxv16i16_nxv16i32_i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv16i16_nxv16i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vnsrl.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.i16(
     <vscale x 16 x i16> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv64.ll
index cdb20025d773..354978856021 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vnsrl_mask_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv1i8_nxv1i16_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vnsrl.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vnsrl_mask_wv_nxv2i8_nxv2i16_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv2i8_nxv2i16_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vnsrl.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vnsrl_mask_wv_nxv4i8_nxv4i16_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv4i8_nxv4i16_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vnsrl.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vnsrl_mask_wv_nxv8i8_nxv8i16_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv8i8_nxv8i16_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vnsrl.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vnsrl_mask_wv_nxv16i8_nxv16i16_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv16i8_nxv16i16_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vnsrl.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vnsrl_mask_wv_nxv32i8_nxv32i16_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv32i8_nxv32i16_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vnsrl.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vnsrl_mask_wv_nxv1i16_nxv1i32_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv1i16_nxv1i32_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vnsrl.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -308,7 +308,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vnsrl_mask_wv_nxv2i16_nxv2i32_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv2i16_nxv2i32_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vnsrl.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vnsrl_mask_wv_nxv4i16_nxv4i32_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv4i16_nxv4i32_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vnsrl.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vnsrl_mask_wv_nxv8i16_nxv8i32_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv8i16_nxv8i32_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vnsrl.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vnsrl_mask_wv_nxv16i16_nxv16i32_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv16i16_nxv16i32_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vnsrl.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vnsrl_mask_wv_nxv1i32_nxv1i64_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv1i32_nxv1i64_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vnsrl.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -508,7 +508,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vnsrl_mask_wv_nxv2i32_nxv2i64_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv2i32_nxv2i64_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vnsrl.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -548,7 +548,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vnsrl_mask_wv_nxv4i32_nxv4i64_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv4i32_nxv4i64_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vnsrl.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -588,7 +588,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vnsrl_mask_wv_nxv8i32_nxv8i64_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv8i32_nxv8i64_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vnsrl.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -628,7 +628,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.i8(
 define <vscale x 1 x i8> @intrinsic_vnsrl_mask_wx_nxv1i8_nxv1i16_i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv1i8_nxv1i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vnsrl.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.i8(
     <vscale x 1 x i8> %0,
@@ -668,7 +668,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.i8(
 define <vscale x 2 x i8> @intrinsic_vnsrl_mask_wx_nxv2i8_nxv2i16_i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv2i8_nxv2i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vnsrl.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.i8(
     <vscale x 2 x i8> %0,
@@ -708,7 +708,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.i8(
 define <vscale x 4 x i8> @intrinsic_vnsrl_mask_wx_nxv4i8_nxv4i16_i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv4i8_nxv4i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vnsrl.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.i8(
     <vscale x 4 x i8> %0,
@@ -748,7 +748,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.i8(
 define <vscale x 8 x i8> @intrinsic_vnsrl_mask_wx_nxv8i8_nxv8i16_i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv8i8_nxv8i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vnsrl.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.i8(
     <vscale x 8 x i8> %0,
@@ -788,7 +788,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.i8(
 define <vscale x 16 x i8> @intrinsic_vnsrl_mask_wx_nxv16i8_nxv16i16_i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv16i8_nxv16i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vnsrl.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.i8(
     <vscale x 16 x i8> %0,
@@ -828,7 +828,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.i8(
 define <vscale x 32 x i8> @intrinsic_vnsrl_mask_wx_nxv32i8_nxv32i16_i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv32i8_nxv32i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vnsrl.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.i8(
     <vscale x 32 x i8> %0,
@@ -868,7 +868,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i16(
 define <vscale x 1 x i16> @intrinsic_vnsrl_mask_wx_nxv1i16_nxv1i32_i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv1i16_nxv1i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vnsrl.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i16(
     <vscale x 1 x i16> %0,
@@ -908,7 +908,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.i16(
 define <vscale x 2 x i16> @intrinsic_vnsrl_mask_wx_nxv2i16_nxv2i32_i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv2i16_nxv2i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vnsrl.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.i16(
     <vscale x 2 x i16> %0,
@@ -948,7 +948,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.i16(
 define <vscale x 4 x i16> @intrinsic_vnsrl_mask_wx_nxv4i16_nxv4i32_i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv4i16_nxv4i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vnsrl.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.i16(
     <vscale x 4 x i16> %0,
@@ -988,7 +988,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.i16(
 define <vscale x 8 x i16> @intrinsic_vnsrl_mask_wx_nxv8i16_nxv8i32_i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv8i16_nxv8i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vnsrl.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.i16(
     <vscale x 8 x i16> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.i16(
 define <vscale x 16 x i16> @intrinsic_vnsrl_mask_wx_nxv16i16_nxv16i32_i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv16i16_nxv16i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vnsrl.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.i16(
     <vscale x 16 x i16> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i32(
 define <vscale x 1 x i32> @intrinsic_vnsrl_mask_wx_nxv1i32_nxv1i64_i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv1i32_nxv1i64_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vnsrl.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i32(
     <vscale x 1 x i32> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.i32(
 define <vscale x 2 x i32> @intrinsic_vnsrl_mask_wx_nxv2i32_nxv2i64_i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv2i32_nxv2i64_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vnsrl.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.i32(
     <vscale x 2 x i32> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.i32(
 define <vscale x 4 x i32> @intrinsic_vnsrl_mask_wx_nxv4i32_nxv4i64_i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv4i32_nxv4i64_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vnsrl.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.i32(
     <vscale x 4 x i32> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.i32(
 define <vscale x 8 x i32> @intrinsic_vnsrl_mask_wx_nxv8i32_nxv8i64_i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wx_nxv8i32_nxv8i64_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vnsrl.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.i32(
     <vscale x 8 x i32> %0,
@@ -1216,7 +1216,7 @@ entry:
 define <vscale x 1 x i8> @intrinsic_vnsrl_mask_wi_nxv1i8_nxv1i16_i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv1i8_nxv1i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vnsrl.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.i8(
     <vscale x 1 x i8> %0,
@@ -1244,7 +1244,7 @@ entry:
 define <vscale x 2 x i8> @intrinsic_vnsrl_mask_wi_nxv2i8_nxv2i16_i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv2i8_nxv2i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vnsrl.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.i8(
     <vscale x 2 x i8> %0,
@@ -1272,7 +1272,7 @@ entry:
 define <vscale x 4 x i8> @intrinsic_vnsrl_mask_wi_nxv4i8_nxv4i16_i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv4i8_nxv4i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vnsrl.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.i8(
     <vscale x 4 x i8> %0,
@@ -1300,7 +1300,7 @@ entry:
 define <vscale x 8 x i8> @intrinsic_vnsrl_mask_wi_nxv8i8_nxv8i16_i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv8i8_nxv8i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vnsrl.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.i8(
     <vscale x 8 x i8> %0,
@@ -1328,7 +1328,7 @@ entry:
 define <vscale x 16 x i8> @intrinsic_vnsrl_mask_wi_nxv16i8_nxv16i16_i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv16i8_nxv16i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vnsrl.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.i8(
     <vscale x 16 x i8> %0,
@@ -1356,7 +1356,7 @@ entry:
 define <vscale x 32 x i8> @intrinsic_vnsrl_mask_wi_nxv32i8_nxv32i16_i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv32i8_nxv32i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vnsrl.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.i8(
     <vscale x 32 x i8> %0,
@@ -1384,7 +1384,7 @@ entry:
 define <vscale x 1 x i16> @intrinsic_vnsrl_mask_wi_nxv1i16_nxv1i32_i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv1i16_nxv1i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vnsrl.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i16(
     <vscale x 1 x i16> %0,
@@ -1412,7 +1412,7 @@ entry:
 define <vscale x 2 x i16> @intrinsic_vnsrl_mask_wi_nxv2i16_nxv2i32_i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv2i16_nxv2i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vnsrl.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.i16(
     <vscale x 2 x i16> %0,
@@ -1440,7 +1440,7 @@ entry:
 define <vscale x 4 x i16> @intrinsic_vnsrl_mask_wi_nxv4i16_nxv4i32_i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv4i16_nxv4i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vnsrl.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.i16(
     <vscale x 4 x i16> %0,
@@ -1468,7 +1468,7 @@ entry:
 define <vscale x 8 x i16> @intrinsic_vnsrl_mask_wi_nxv8i16_nxv8i32_i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv8i16_nxv8i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vnsrl.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.i16(
     <vscale x 8 x i16> %0,
@@ -1496,7 +1496,7 @@ entry:
 define <vscale x 16 x i16> @intrinsic_vnsrl_mask_wi_nxv16i16_nxv16i32_i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv16i16_nxv16i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vnsrl.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.i16(
     <vscale x 16 x i16> %0,
@@ -1524,7 +1524,7 @@ entry:
 define <vscale x 1 x i32> @intrinsic_vnsrl_mask_wi_nxv1i32_nxv1i64_i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv1i32_nxv1i64_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vnsrl.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i32(
     <vscale x 1 x i32> %0,
@@ -1552,7 +1552,7 @@ entry:
 define <vscale x 2 x i32> @intrinsic_vnsrl_mask_wi_nxv2i32_nxv2i64_i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv2i32_nxv2i64_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vnsrl.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.i32(
     <vscale x 2 x i32> %0,
@@ -1580,7 +1580,7 @@ entry:
 define <vscale x 4 x i32> @intrinsic_vnsrl_mask_wi_nxv4i32_nxv4i64_i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv4i32_nxv4i64_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vnsrl.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.i32(
     <vscale x 4 x i32> %0,
@@ -1608,7 +1608,7 @@ entry:
 define <vscale x 8 x i32> @intrinsic_vnsrl_mask_wi_nxv8i32_nxv8i64_i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vnsrl_mask_wi_nxv8i32_nxv8i64_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vnsrl.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.i32(
     <vscale x 8 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll
index 1ac44ba020ea..c626c3c4f608 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vor_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vor_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vor_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vor_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vor_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vor_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vor_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv64i8_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -308,7 +308,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vor_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vor_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vor_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vor_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vor_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vor_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv32i16_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vor_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -588,7 +588,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vor_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -628,7 +628,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vor_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -668,7 +668,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vor_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -708,7 +708,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vor_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv16i32_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vor_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -788,7 +788,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vor_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -828,7 +828,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vor_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -868,7 +868,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vor_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -908,7 +908,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vor_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -948,7 +948,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vor_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -988,7 +988,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vor_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vor_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vor_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vor_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vor_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vor_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1228,7 +1228,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vor_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1268,7 +1268,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vor_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1308,7 +1308,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vor_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1348,7 +1348,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vor_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1388,7 +1388,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vor_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1428,7 +1428,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vor_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1456,7 +1456,7 @@ entry:
 define <vscale x 1 x i8> @intrinsic_vor_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1484,7 +1484,7 @@ entry:
 define <vscale x 2 x i8> @intrinsic_vor_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1512,7 +1512,7 @@ entry:
 define <vscale x 4 x i8> @intrinsic_vor_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1540,7 +1540,7 @@ entry:
 define <vscale x 8 x i8> @intrinsic_vor_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1568,7 +1568,7 @@ entry:
 define <vscale x 16 x i8> @intrinsic_vor_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1596,7 +1596,7 @@ entry:
 define <vscale x 32 x i8> @intrinsic_vor_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1624,7 +1624,7 @@ entry:
 define <vscale x 64 x i8> @intrinsic_vor_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1652,7 +1652,7 @@ entry:
 define <vscale x 1 x i16> @intrinsic_vor_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1680,7 +1680,7 @@ entry:
 define <vscale x 2 x i16> @intrinsic_vor_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1708,7 +1708,7 @@ entry:
 define <vscale x 4 x i16> @intrinsic_vor_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1736,7 +1736,7 @@ entry:
 define <vscale x 8 x i16> @intrinsic_vor_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1764,7 +1764,7 @@ entry:
 define <vscale x 16 x i16> @intrinsic_vor_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1792,7 +1792,7 @@ entry:
 define <vscale x 32 x i16> @intrinsic_vor_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1820,7 +1820,7 @@ entry:
 define <vscale x 1 x i32> @intrinsic_vor_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1848,7 +1848,7 @@ entry:
 define <vscale x 2 x i32> @intrinsic_vor_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1876,7 +1876,7 @@ entry:
 define <vscale x 4 x i32> @intrinsic_vor_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1904,7 +1904,7 @@ entry:
 define <vscale x 8 x i32> @intrinsic_vor_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1932,7 +1932,7 @@ entry:
 define <vscale x 16 x i32> @intrinsic_vor_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vor-rv64.ll
index ce2107138f93..f5d70c9a3639 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vor-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vor-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vor_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vor_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vor_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vor_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vor_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vor_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vor_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv64i8_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -308,7 +308,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vor_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vor_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vor_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vor_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vor_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vor_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv32i16_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vor_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -588,7 +588,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vor_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -628,7 +628,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vor_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -668,7 +668,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vor_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -708,7 +708,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vor_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv16i32_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vor_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv1i64_nxv1i64_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -788,7 +788,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.nxv2i64(
 define <vscale x 2 x i64> @intrinsic_vor_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv2i64_nxv2i64_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -828,7 +828,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.nxv4i64(
 define <vscale x 4 x i64> @intrinsic_vor_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv4i64_nxv4i64_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -868,7 +868,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.nxv8i64(
 define <vscale x 8 x i64> @intrinsic_vor_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv8i64_nxv8i64_nxv8i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -908,7 +908,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vor_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -948,7 +948,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vor_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -988,7 +988,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vor_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vor_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vor_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vor_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vor_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vor_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1228,7 +1228,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vor_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1268,7 +1268,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vor_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1308,7 +1308,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vor_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1348,7 +1348,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vor_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1388,7 +1388,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vor_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1428,7 +1428,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vor_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1468,7 +1468,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vor_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1508,7 +1508,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vor_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1548,7 +1548,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vor_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1588,7 +1588,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vor_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1628,7 +1628,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.i64(
 define <vscale x 1 x i64> @intrinsic_vor_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv1i64_nxv1i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1668,7 +1668,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.i64(
 define <vscale x 2 x i64> @intrinsic_vor_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv2i64_nxv2i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1708,7 +1708,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.i64(
 define <vscale x 4 x i64> @intrinsic_vor_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv4i64_nxv4i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1748,7 +1748,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.i64(
 define <vscale x 8 x i64> @intrinsic_vor_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv8i64_nxv8i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1776,7 +1776,7 @@ entry:
 define <vscale x 1 x i8> @intrinsic_vor_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1804,7 +1804,7 @@ entry:
 define <vscale x 2 x i8> @intrinsic_vor_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1832,7 +1832,7 @@ entry:
 define <vscale x 4 x i8> @intrinsic_vor_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1860,7 +1860,7 @@ entry:
 define <vscale x 8 x i8> @intrinsic_vor_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1888,7 +1888,7 @@ entry:
 define <vscale x 16 x i8> @intrinsic_vor_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1916,7 +1916,7 @@ entry:
 define <vscale x 32 x i8> @intrinsic_vor_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1944,7 +1944,7 @@ entry:
 define <vscale x 64 x i8> @intrinsic_vor_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1972,7 +1972,7 @@ entry:
 define <vscale x 1 x i16> @intrinsic_vor_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2000,7 +2000,7 @@ entry:
 define <vscale x 2 x i16> @intrinsic_vor_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2028,7 +2028,7 @@ entry:
 define <vscale x 4 x i16> @intrinsic_vor_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2056,7 +2056,7 @@ entry:
 define <vscale x 8 x i16> @intrinsic_vor_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2084,7 +2084,7 @@ entry:
 define <vscale x 16 x i16> @intrinsic_vor_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2112,7 +2112,7 @@ entry:
 define <vscale x 32 x i16> @intrinsic_vor_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -2140,7 +2140,7 @@ entry:
 define <vscale x 1 x i32> @intrinsic_vor_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2168,7 +2168,7 @@ entry:
 define <vscale x 2 x i32> @intrinsic_vor_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2196,7 +2196,7 @@ entry:
 define <vscale x 4 x i32> @intrinsic_vor_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2224,7 +2224,7 @@ entry:
 define <vscale x 8 x i32> @intrinsic_vor_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2252,7 +2252,7 @@ entry:
 define <vscale x 16 x i32> @intrinsic_vor_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2280,7 +2280,7 @@ entry:
 define <vscale x 1 x i64> @intrinsic_vor_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv1i64_nxv1i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2308,7 +2308,7 @@ entry:
 define <vscale x 2 x i64> @intrinsic_vor_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv2i64_nxv2i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2336,7 +2336,7 @@ entry:
 define <vscale x 4 x i64> @intrinsic_vor_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv4i64_nxv4i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2364,7 +2364,7 @@ entry:
 define <vscale x 8 x i64> @intrinsic_vor_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv8i64_nxv8i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll
index 206bcb99a4c5..f20b20adf32e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vrem.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vrem_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vrem.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vrem.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vrem_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vrem.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vrem.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vrem_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vrem.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vrem.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vrem_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vrem.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vrem.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vrem_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vrem.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vrem.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vrem_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vrem.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vrem.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vrem_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv64i8_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vrem.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -308,7 +308,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vrem.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vrem_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vrem.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vrem.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vrem_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vrem.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vrem.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vrem_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vrem.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vrem.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vrem_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vrem.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vrem.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vrem_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vrem.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vrem.mask.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vrem_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv32i16_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vrem.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vrem.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vrem_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vrem.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -588,7 +588,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vrem.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vrem_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vrem.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -628,7 +628,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vrem.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vrem_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vrem.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -668,7 +668,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vrem.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vrem_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vrem.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -708,7 +708,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vrem.mask.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vrem_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv16i32_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vrem.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vrem.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vrem_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vrem.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -788,7 +788,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vrem.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vrem_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vrem.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -828,7 +828,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vrem.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vrem_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vrem.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -868,7 +868,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vrem.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vrem_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vrem.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -908,7 +908,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vrem.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vrem_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vrem.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -948,7 +948,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vrem.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vrem_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vrem.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -988,7 +988,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vrem.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vrem_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vrem.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vrem.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vrem_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vrem.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vrem.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vrem_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vrem.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vrem.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vrem_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vrem.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vrem.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vrem_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vrem.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vrem.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vrem_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vrem.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1228,7 +1228,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vrem.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vrem_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vrem.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1268,7 +1268,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vrem.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vrem_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vrem.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1308,7 +1308,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vrem.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vrem_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vrem.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1348,7 +1348,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vrem.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vrem_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vrem.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1388,7 +1388,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vrem.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vrem_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vrem.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1428,7 +1428,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vrem.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vrem_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vrem.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vrem-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-rv64.ll
index 85a7a13a1619..cd0b20be8b40 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vrem-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vrem-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vrem.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vrem_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vrem.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vrem.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vrem_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vrem.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vrem.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vrem_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vrem.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vrem.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vrem_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vrem.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vrem.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vrem_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vrem.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vrem.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vrem_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vrem.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vrem.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vrem_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv64i8_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vrem.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -308,7 +308,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vrem.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vrem_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vrem.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vrem.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vrem_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vrem.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vrem.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vrem_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vrem.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vrem.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vrem_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vrem.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vrem.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vrem_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vrem.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vrem.mask.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vrem_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv32i16_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vrem.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vrem.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vrem_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vrem.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -588,7 +588,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vrem.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vrem_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vrem.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -628,7 +628,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vrem.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vrem_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vrem.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -668,7 +668,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vrem.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vrem_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vrem.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -708,7 +708,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vrem.mask.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vrem_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv16i32_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vrem.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vrem.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vrem_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv1i64_nxv1i64_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vrem.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -788,7 +788,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vrem.mask.nxv2i64.nxv2i64(
 define <vscale x 2 x i64> @intrinsic_vrem_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv2i64_nxv2i64_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vrem.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -828,7 +828,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vrem.mask.nxv4i64.nxv4i64(
 define <vscale x 4 x i64> @intrinsic_vrem_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv4i64_nxv4i64_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vrem.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -868,7 +868,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vrem.mask.nxv8i64.nxv8i64(
 define <vscale x 8 x i64> @intrinsic_vrem_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv8i64_nxv8i64_nxv8i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vrem.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -908,7 +908,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vrem.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vrem_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vrem.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -948,7 +948,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vrem.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vrem_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vrem.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -988,7 +988,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vrem.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vrem_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vrem.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vrem.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vrem_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vrem.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vrem.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vrem_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vrem.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vrem.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vrem_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vrem.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vrem.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vrem_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vrem.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vrem.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vrem_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vrem.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1228,7 +1228,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vrem.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vrem_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vrem.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1268,7 +1268,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vrem.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vrem_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vrem.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1308,7 +1308,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vrem.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vrem_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vrem.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1348,7 +1348,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vrem.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vrem_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vrem.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1388,7 +1388,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vrem.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vrem_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vrem.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1428,7 +1428,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vrem.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vrem_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vrem.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1468,7 +1468,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vrem.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vrem_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vrem.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1508,7 +1508,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vrem.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vrem_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vrem.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1548,7 +1548,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vrem.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vrem_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vrem.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1588,7 +1588,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vrem.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vrem_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vrem.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1628,7 +1628,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vrem.mask.nxv1i64.i64(
 define <vscale x 1 x i64> @intrinsic_vrem_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv1i64_nxv1i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vrem.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1668,7 +1668,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vrem.mask.nxv2i64.i64(
 define <vscale x 2 x i64> @intrinsic_vrem_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv2i64_nxv2i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vrem.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1708,7 +1708,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vrem.mask.nxv4i64.i64(
 define <vscale x 4 x i64> @intrinsic_vrem_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv4i64_nxv4i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vrem.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1748,7 +1748,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vrem.mask.nxv8i64.i64(
 define <vscale x 8 x i64> @intrinsic_vrem_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv8i64_nxv8i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vrem.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll
index e5156293f5de..822ddbb6f380 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vremu.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vremu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vremu.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vremu.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vremu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vremu.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vremu.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vremu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vremu.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vremu.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vremu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vremu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vremu.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vremu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vremu.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vremu.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vremu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vremu.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vremu.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vremu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv64i8_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vremu.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -308,7 +308,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vremu.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vremu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vremu.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vremu.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vremu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vremu.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vremu.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vremu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vremu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vremu.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vremu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vremu.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vremu.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vremu_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vremu.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vremu.mask.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vremu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv32i16_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vremu.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vremu.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vremu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vremu.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -588,7 +588,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vremu.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vremu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vremu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -628,7 +628,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vremu.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vremu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vremu.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -668,7 +668,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vremu.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vremu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vremu.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -708,7 +708,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vremu.mask.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vremu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv16i32_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vremu.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vremu.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vremu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vremu.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -788,7 +788,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vremu.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vremu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vremu.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -828,7 +828,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vremu.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vremu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vremu.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -868,7 +868,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vremu.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vremu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vremu.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -908,7 +908,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vremu.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vremu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vremu.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -948,7 +948,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vremu.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vremu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vremu.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -988,7 +988,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vremu.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vremu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vremu.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vremu.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vremu_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vremu.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vremu.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vremu_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vremu.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vremu.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vremu_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vremu.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vremu.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vremu_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vremu.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vremu.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vremu_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vremu.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1228,7 +1228,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vremu.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vremu_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vremu.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1268,7 +1268,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vremu.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vremu_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vremu.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1308,7 +1308,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vremu.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vremu_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vremu.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1348,7 +1348,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vremu.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vremu_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vremu.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1388,7 +1388,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vremu.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vremu_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vremu.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1428,7 +1428,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vremu.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vremu_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vremu.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vremu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vremu-rv64.ll
index b946328c60fa..1014fac681f7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vremu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vremu-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vremu.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vremu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vremu.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vremu.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vremu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vremu.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vremu.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vremu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vremu.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vremu.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vremu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vremu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vremu.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vremu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vremu.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vremu.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vremu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vremu.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vremu.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vremu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv64i8_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vremu.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -308,7 +308,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vremu.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vremu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vremu.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vremu.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vremu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vremu.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vremu.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vremu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vremu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vremu.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vremu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vremu.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vremu.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vremu_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vremu.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vremu.mask.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vremu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv32i16_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vremu.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vremu.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vremu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vremu.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -588,7 +588,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vremu.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vremu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vremu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -628,7 +628,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vremu.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vremu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vremu.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -668,7 +668,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vremu.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vremu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vremu.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -708,7 +708,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vremu.mask.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vremu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv16i32_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vremu.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vremu.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vremu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv1i64_nxv1i64_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vremu.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -788,7 +788,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vremu.mask.nxv2i64.nxv2i64(
 define <vscale x 2 x i64> @intrinsic_vremu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv2i64_nxv2i64_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vremu.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -828,7 +828,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vremu.mask.nxv4i64.nxv4i64(
 define <vscale x 4 x i64> @intrinsic_vremu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv4i64_nxv4i64_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vremu.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -868,7 +868,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vremu.mask.nxv8i64.nxv8i64(
 define <vscale x 8 x i64> @intrinsic_vremu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv8i64_nxv8i64_nxv8i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vremu.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -908,7 +908,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vremu.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vremu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vremu.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -948,7 +948,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vremu.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vremu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vremu.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -988,7 +988,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vremu.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vremu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vremu.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vremu.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vremu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vremu.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vremu.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vremu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vremu.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vremu.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vremu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vremu.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vremu.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vremu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vremu.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vremu.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vremu_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vremu.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1228,7 +1228,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vremu.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vremu_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vremu.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1268,7 +1268,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vremu.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vremu_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vremu.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1308,7 +1308,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vremu.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vremu_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vremu.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1348,7 +1348,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vremu.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vremu_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vremu.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1388,7 +1388,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vremu.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vremu_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vremu.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1428,7 +1428,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vremu.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vremu_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vremu.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1468,7 +1468,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vremu.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vremu_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vremu.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1508,7 +1508,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vremu.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vremu_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vremu.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1548,7 +1548,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vremu.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vremu_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vremu.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1588,7 +1588,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vremu.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vremu_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vremu.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1628,7 +1628,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vremu.mask.nxv1i64.i64(
 define <vscale x 1 x i64> @intrinsic_vremu_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv1i64_nxv1i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vremu.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1668,7 +1668,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vremu.mask.nxv2i64.i64(
 define <vscale x 2 x i64> @intrinsic_vremu_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv2i64_nxv2i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vremu.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1708,7 +1708,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vremu.mask.nxv4i64.i64(
 define <vscale x 4 x i64> @intrinsic_vremu_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv4i64_nxv4i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vremu.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1748,7 +1748,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vremu.mask.nxv8i64.i64(
 define <vscale x 8 x i64> @intrinsic_vremu_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv8i64_nxv8i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vremu.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll
index cbe399a77c56..fceb8f276647 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll
@@ -32,7 +32,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vrgather.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vrgather.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -77,7 +77,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vrgather.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vrgather_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2i8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vrgather.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -122,7 +122,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vrgather.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vrgather_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4i8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -167,7 +167,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vrgather.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vrgather_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -212,7 +212,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vrgather.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vrgather_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16i8_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vv v16, v18, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -259,7 +259,7 @@ define <vscale x 32 x i8> @intrinsic_vrgather_mask_vv_nxv32i8_nxv32i8_nxv32i8(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e8,m4,ta,mu
 ; CHECK-NEXT:    vle8.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vv v16, v20, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -309,7 +309,7 @@ define <vscale x 64 x i8> @intrinsic_vrgather_mask_vv_nxv64i8_nxv64i8_nxv64i8(<v
 ; CHECK-NEXT:    vsetvli a3, zero, e8,m8,ta,mu
 ; CHECK-NEXT:    vle8.v v8, (a1)
 ; CHECK-NEXT:    vle8.v v24, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e8,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e8,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vv v16, v24, v8, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -354,7 +354,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vrgather.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vrgather_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i16_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vrgather.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -399,7 +399,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vrgather.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vrgather_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2i16_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -444,7 +444,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vrgather.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vrgather_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -489,7 +489,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vrgather.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vrgather_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8i16_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vv v16, v18, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -536,7 +536,7 @@ define <vscale x 16 x i16> @intrinsic_vrgather_mask_vv_nxv16i16_nxv16i16_nxv16i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vv v16, v20, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -586,7 +586,7 @@ define <vscale x 32 x i16> @intrinsic_vrgather_mask_vv_nxv32i16_nxv32i16_nxv32i1
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m8,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a1)
 ; CHECK-NEXT:    vle16.v v24, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e16,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vv v16, v24, v8, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -631,7 +631,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vrgather.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vrgather_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i32_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -676,7 +676,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vrgather.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vrgather_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -721,7 +721,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vrgather.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vrgather_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4i32_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vv v16, v18, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -768,7 +768,7 @@ define <vscale x 8 x i32> @intrinsic_vrgather_mask_vv_nxv8i32_nxv8i32_nxv8i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vv v16, v20, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -818,7 +818,7 @@ define <vscale x 16 x i32> @intrinsic_vrgather_mask_vv_nxv16i32_nxv16i32_nxv16i3
 ; CHECK-NEXT:    vsetvli a3, zero, e32,m8,ta,mu
 ; CHECK-NEXT:    vle32.v v8, (a1)
 ; CHECK-NEXT:    vle32.v v24, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e32,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vv v16, v24, v8, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -863,7 +863,7 @@ declare <vscale x 1 x half> @llvm.riscv.vrgather.mask.nxv1f16.nxv1i16(
 define <vscale x 1 x half> @intrinsic_vrgather_mask_vv_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1f16_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vrgather.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -908,7 +908,7 @@ declare <vscale x 2 x half> @llvm.riscv.vrgather.mask.nxv2f16.nxv2i16(
 define <vscale x 2 x half> @intrinsic_vrgather_mask_vv_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2f16_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -953,7 +953,7 @@ declare <vscale x 4 x half> @llvm.riscv.vrgather.mask.nxv4f16.nxv4i16(
 define <vscale x 4 x half> @intrinsic_vrgather_mask_vv_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4f16_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -998,7 +998,7 @@ declare <vscale x 8 x half> @llvm.riscv.vrgather.mask.nxv8f16.nxv8i16(
 define <vscale x 8 x half> @intrinsic_vrgather_mask_vv_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8f16_nxv8f16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vv v16, v18, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1045,7 +1045,7 @@ define <vscale x 16 x half> @intrinsic_vrgather_mask_vv_nxv16f16_nxv16f16_nxv16i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vv v16, v20, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1095,7 +1095,7 @@ define <vscale x 32 x half> @intrinsic_vrgather_mask_vv_nxv32f16_nxv32f16_nxv32i
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m8,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a1)
 ; CHECK-NEXT:    vle16.v v24, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e16,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vv v16, v24, v8, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1140,7 +1140,7 @@ declare <vscale x 1 x float> @llvm.riscv.vrgather.mask.nxv1f32.nxv1i32(
 define <vscale x 1 x float> @intrinsic_vrgather_mask_vv_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1f32_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1185,7 +1185,7 @@ declare <vscale x 2 x float> @llvm.riscv.vrgather.mask.nxv2f32.nxv2i32(
 define <vscale x 2 x float> @intrinsic_vrgather_mask_vv_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2f32_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1230,7 +1230,7 @@ declare <vscale x 4 x float> @llvm.riscv.vrgather.mask.nxv4f32.nxv4i32(
 define <vscale x 4 x float> @intrinsic_vrgather_mask_vv_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4f32_nxv4f32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vv v16, v18, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1277,7 +1277,7 @@ define <vscale x 8 x float> @intrinsic_vrgather_mask_vv_nxv8f32_nxv8f32_nxv8i32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vv v16, v20, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1327,7 +1327,7 @@ define <vscale x 16 x float> @intrinsic_vrgather_mask_vv_nxv16f32_nxv16f32_nxv16
 ; CHECK-NEXT:    vsetvli a3, zero, e32,m8,ta,mu
 ; CHECK-NEXT:    vle32.v v8, (a1)
 ; CHECK-NEXT:    vle32.v v24, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e32,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vv v16, v24, v8, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1372,7 +1372,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vrgather.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vrgather_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1i8_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vrgather.vx v16, v17, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1417,7 +1417,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vrgather.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vrgather_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2i8_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vrgather.vx v16, v17, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1462,7 +1462,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vrgather.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vrgather_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4i8_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vx v16, v17, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1507,7 +1507,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vrgather.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vrgather_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8i8_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vx v16, v17, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1552,7 +1552,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vrgather.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vrgather_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16i8_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vx v16, v18, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1597,7 +1597,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vrgather.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vrgather_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv32i8_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vx v16, v20, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1644,7 +1644,7 @@ define <vscale x 64 x i8> @intrinsic_vrgather_mask_vx_nxv64i8_nxv64i8_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a3, zero, e8,m8,ta,mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e8,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e8,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vx v16, v8, a1, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1689,7 +1689,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vrgather.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vrgather_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1i16_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vrgather.vx v16, v17, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1734,7 +1734,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vrgather.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vrgather_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2i16_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vx v16, v17, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1779,7 +1779,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vrgather.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vrgather_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4i16_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vx v16, v17, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1824,7 +1824,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vrgather.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vrgather_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8i16_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vx v16, v18, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1869,7 +1869,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vrgather.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vrgather_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16i16_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vx v16, v20, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1916,7 +1916,7 @@ define <vscale x 32 x i16> @intrinsic_vrgather_mask_vx_nxv32i16_nxv32i16_i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m8,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e16,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vx v16, v8, a1, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1961,7 +1961,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vrgather.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vrgather_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1i32_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vx v16, v17, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -2006,7 +2006,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vrgather.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vrgather_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2i32_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vx v16, v17, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -2051,7 +2051,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vrgather.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vrgather_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4i32_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vx v16, v18, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -2096,7 +2096,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vrgather.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vrgather_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8i32_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vx v16, v20, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -2143,7 +2143,7 @@ define <vscale x 16 x i32> @intrinsic_vrgather_mask_vx_nxv16i32_nxv16i32_i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a3, zero, e32,m8,ta,mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e32,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vx v16, v8, a1, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -2188,7 +2188,7 @@ declare <vscale x 1 x half> @llvm.riscv.vrgather.mask.nxv1f16.i16(
 define <vscale x 1 x half> @intrinsic_vrgather_mask_vx_nxv1f16_nxv1f16_i16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1f16_nxv1f16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vrgather.vx v16, v17, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -2233,7 +2233,7 @@ declare <vscale x 2 x half> @llvm.riscv.vrgather.mask.nxv2f16.i16(
 define <vscale x 2 x half> @intrinsic_vrgather_mask_vx_nxv2f16_nxv2f16_i16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2f16_nxv2f16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vx v16, v17, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -2278,7 +2278,7 @@ declare <vscale x 4 x half> @llvm.riscv.vrgather.mask.nxv4f16.i16(
 define <vscale x 4 x half> @intrinsic_vrgather_mask_vx_nxv4f16_nxv4f16_i16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4f16_nxv4f16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vx v16, v17, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -2323,7 +2323,7 @@ declare <vscale x 8 x half> @llvm.riscv.vrgather.mask.nxv8f16.i16(
 define <vscale x 8 x half> @intrinsic_vrgather_mask_vx_nxv8f16_nxv8f16_i16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8f16_nxv8f16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vx v16, v18, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -2368,7 +2368,7 @@ declare <vscale x 16 x half> @llvm.riscv.vrgather.mask.nxv16f16.i16(
 define <vscale x 16 x half> @intrinsic_vrgather_mask_vx_nxv16f16_nxv16f16_i16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16f16_nxv16f16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vx v16, v20, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -2415,7 +2415,7 @@ define <vscale x 32 x half> @intrinsic_vrgather_mask_vx_nxv32f16_nxv32f16_i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m8,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e16,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vx v16, v8, a1, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -2460,7 +2460,7 @@ declare <vscale x 1 x float> @llvm.riscv.vrgather.mask.nxv1f32.i32(
 define <vscale x 1 x float> @intrinsic_vrgather_mask_vx_nxv1f32_nxv1f32_i32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1f32_nxv1f32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vx v16, v17, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -2505,7 +2505,7 @@ declare <vscale x 2 x float> @llvm.riscv.vrgather.mask.nxv2f32.i32(
 define <vscale x 2 x float> @intrinsic_vrgather_mask_vx_nxv2f32_nxv2f32_i32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2f32_nxv2f32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vx v16, v17, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -2550,7 +2550,7 @@ declare <vscale x 4 x float> @llvm.riscv.vrgather.mask.nxv4f32.i32(
 define <vscale x 4 x float> @intrinsic_vrgather_mask_vx_nxv4f32_nxv4f32_i32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4f32_nxv4f32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vx v16, v18, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -2595,7 +2595,7 @@ declare <vscale x 8 x float> @llvm.riscv.vrgather.mask.nxv8f32.i32(
 define <vscale x 8 x float> @intrinsic_vrgather_mask_vx_nxv8f32_nxv8f32_i32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8f32_nxv8f32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vx v16, v20, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -2642,7 +2642,7 @@ define <vscale x 16 x float> @intrinsic_vrgather_mask_vx_nxv16f32_nxv16f32_i32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a3, zero, e32,m8,ta,mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e32,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vx v16, v8, a1, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -2675,7 +2675,7 @@ entry:
 define <vscale x 1 x i8> @intrinsic_vrgather_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1i8_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vrgather.vi v16, v17, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -2708,7 +2708,7 @@ entry:
 define <vscale x 2 x i8> @intrinsic_vrgather_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2i8_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vrgather.vi v16, v17, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -2741,7 +2741,7 @@ entry:
 define <vscale x 4 x i8> @intrinsic_vrgather_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4i8_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vi v16, v17, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -2774,7 +2774,7 @@ entry:
 define <vscale x 8 x i8> @intrinsic_vrgather_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8i8_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vi v16, v17, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -2807,7 +2807,7 @@ entry:
 define <vscale x 16 x i8> @intrinsic_vrgather_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16i8_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vi v16, v18, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -2840,7 +2840,7 @@ entry:
 define <vscale x 32 x i8> @intrinsic_vrgather_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv32i8_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vi v16, v20, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -2875,7 +2875,7 @@ define <vscale x 64 x i8> @intrinsic_vrgather_mask_vi_nxv64i8_nxv64i8_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e8,m8,ta,mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e8,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vi v16, v8, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -2908,7 +2908,7 @@ entry:
 define <vscale x 1 x i16> @intrinsic_vrgather_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1i16_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vrgather.vi v16, v17, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -2941,7 +2941,7 @@ entry:
 define <vscale x 2 x i16> @intrinsic_vrgather_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2i16_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vi v16, v17, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -2974,7 +2974,7 @@ entry:
 define <vscale x 4 x i16> @intrinsic_vrgather_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4i16_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vi v16, v17, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -3007,7 +3007,7 @@ entry:
 define <vscale x 8 x i16> @intrinsic_vrgather_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8i16_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vi v16, v18, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -3040,7 +3040,7 @@ entry:
 define <vscale x 16 x i16> @intrinsic_vrgather_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16i16_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vi v16, v20, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -3075,7 +3075,7 @@ define <vscale x 32 x i16> @intrinsic_vrgather_mask_vi_nxv32i16_nxv32i16_i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e16,m8,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vi v16, v8, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -3108,7 +3108,7 @@ entry:
 define <vscale x 1 x i32> @intrinsic_vrgather_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1i32_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vi v16, v17, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -3141,7 +3141,7 @@ entry:
 define <vscale x 2 x i32> @intrinsic_vrgather_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2i32_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vi v16, v17, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -3174,7 +3174,7 @@ entry:
 define <vscale x 4 x i32> @intrinsic_vrgather_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4i32_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vi v16, v18, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -3207,7 +3207,7 @@ entry:
 define <vscale x 8 x i32> @intrinsic_vrgather_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8i32_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vi v16, v20, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -3242,7 +3242,7 @@ define <vscale x 16 x i32> @intrinsic_vrgather_mask_vi_nxv16i32_nxv16i32_i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e32,m8,ta,mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vi v16, v8, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -3275,7 +3275,7 @@ entry:
 define <vscale x 1 x half> @intrinsic_vrgather_mask_vi_nxv1f16_nxv1f16_i16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1f16_nxv1f16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vrgather.vi v16, v17, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -3308,7 +3308,7 @@ entry:
 define <vscale x 2 x half> @intrinsic_vrgather_mask_vi_nxv2f16_nxv2f16_i16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2f16_nxv2f16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vi v16, v17, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -3341,7 +3341,7 @@ entry:
 define <vscale x 4 x half> @intrinsic_vrgather_mask_vi_nxv4f16_nxv4f16_i16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4f16_nxv4f16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vi v16, v17, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -3374,7 +3374,7 @@ entry:
 define <vscale x 8 x half> @intrinsic_vrgather_mask_vi_nxv8f16_nxv8f16_i16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8f16_nxv8f16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vi v16, v18, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -3407,7 +3407,7 @@ entry:
 define <vscale x 16 x half> @intrinsic_vrgather_mask_vi_nxv16f16_nxv16f16_i16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16f16_nxv16f16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vi v16, v20, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -3442,7 +3442,7 @@ define <vscale x 32 x half> @intrinsic_vrgather_mask_vi_nxv32f16_nxv32f16_i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e16,m8,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vi v16, v8, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -3475,7 +3475,7 @@ entry:
 define <vscale x 1 x float> @intrinsic_vrgather_mask_vi_nxv1f32_nxv1f32_i32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1f32_nxv1f32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vi v16, v17, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -3508,7 +3508,7 @@ entry:
 define <vscale x 2 x float> @intrinsic_vrgather_mask_vi_nxv2f32_nxv2f32_i32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2f32_nxv2f32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vi v16, v17, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -3541,7 +3541,7 @@ entry:
 define <vscale x 4 x float> @intrinsic_vrgather_mask_vi_nxv4f32_nxv4f32_i32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4f32_nxv4f32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vi v16, v18, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -3574,7 +3574,7 @@ entry:
 define <vscale x 8 x float> @intrinsic_vrgather_mask_vi_nxv8f32_nxv8f32_i32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8f32_nxv8f32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vi v16, v20, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -3609,7 +3609,7 @@ define <vscale x 16 x float> @intrinsic_vrgather_mask_vi_nxv16f32_nxv16f32_i32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e32,m8,ta,mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vi v16, v8, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll
index c9c0a7221433..ffd2dc414315 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll
@@ -32,7 +32,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vrgather.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vrgather.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -77,7 +77,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vrgather.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vrgather_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2i8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vrgather.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -122,7 +122,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vrgather.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vrgather_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4i8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -167,7 +167,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vrgather.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vrgather_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -212,7 +212,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vrgather.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vrgather_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16i8_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vv v16, v18, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -259,7 +259,7 @@ define <vscale x 32 x i8> @intrinsic_vrgather_mask_vv_nxv32i8_nxv32i8_nxv32i8(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e8,m4,ta,mu
 ; CHECK-NEXT:    vle8.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vv v16, v20, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -309,7 +309,7 @@ define <vscale x 64 x i8> @intrinsic_vrgather_mask_vv_nxv64i8_nxv64i8_nxv64i8(<v
 ; CHECK-NEXT:    vsetvli a3, zero, e8,m8,ta,mu
 ; CHECK-NEXT:    vle8.v v8, (a1)
 ; CHECK-NEXT:    vle8.v v24, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e8,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e8,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vv v16, v24, v8, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -354,7 +354,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vrgather.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vrgather_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i16_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vrgather.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -399,7 +399,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vrgather.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vrgather_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2i16_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -444,7 +444,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vrgather.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vrgather_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -489,7 +489,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vrgather.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vrgather_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8i16_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vv v16, v18, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -536,7 +536,7 @@ define <vscale x 16 x i16> @intrinsic_vrgather_mask_vv_nxv16i16_nxv16i16_nxv16i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vv v16, v20, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -586,7 +586,7 @@ define <vscale x 32 x i16> @intrinsic_vrgather_mask_vv_nxv32i16_nxv32i16_nxv32i1
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m8,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a1)
 ; CHECK-NEXT:    vle16.v v24, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e16,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vv v16, v24, v8, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -631,7 +631,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vrgather.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vrgather_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i32_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -676,7 +676,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vrgather.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vrgather_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -721,7 +721,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vrgather.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vrgather_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4i32_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vv v16, v18, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -768,7 +768,7 @@ define <vscale x 8 x i32> @intrinsic_vrgather_mask_vv_nxv8i32_nxv8i32_nxv8i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vv v16, v20, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -818,7 +818,7 @@ define <vscale x 16 x i32> @intrinsic_vrgather_mask_vv_nxv16i32_nxv16i32_nxv16i3
 ; CHECK-NEXT:    vsetvli a3, zero, e32,m8,ta,mu
 ; CHECK-NEXT:    vle32.v v8, (a1)
 ; CHECK-NEXT:    vle32.v v24, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e32,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vv v16, v24, v8, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -863,7 +863,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vrgather.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vrgather_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -908,7 +908,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vrgather.mask.nxv2i64.nxv2i64(
 define <vscale x 2 x i64> @intrinsic_vrgather_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2i64_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vv v16, v18, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -955,7 +955,7 @@ define <vscale x 4 x i64> @intrinsic_vrgather_mask_vv_nxv4i64_nxv4i64_nxv4i64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e64,m4,ta,mu
 ; CHECK-NEXT:    vle64.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vv v16, v20, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1005,7 +1005,7 @@ define <vscale x 8 x i64> @intrinsic_vrgather_mask_vv_nxv8i64_nxv8i64_nxv8i64(<v
 ; CHECK-NEXT:    vsetvli a3, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vle64.v v8, (a1)
 ; CHECK-NEXT:    vle64.v v24, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e64,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vv v16, v24, v8, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1050,7 +1050,7 @@ declare <vscale x 1 x half> @llvm.riscv.vrgather.mask.nxv1f16.nxv1i16(
 define <vscale x 1 x half> @intrinsic_vrgather_mask_vv_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1f16_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vrgather.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1095,7 +1095,7 @@ declare <vscale x 2 x half> @llvm.riscv.vrgather.mask.nxv2f16.nxv2i16(
 define <vscale x 2 x half> @intrinsic_vrgather_mask_vv_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2f16_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1140,7 +1140,7 @@ declare <vscale x 4 x half> @llvm.riscv.vrgather.mask.nxv4f16.nxv4i16(
 define <vscale x 4 x half> @intrinsic_vrgather_mask_vv_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4f16_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1185,7 +1185,7 @@ declare <vscale x 8 x half> @llvm.riscv.vrgather.mask.nxv8f16.nxv8i16(
 define <vscale x 8 x half> @intrinsic_vrgather_mask_vv_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8f16_nxv8f16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vv v16, v18, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1232,7 +1232,7 @@ define <vscale x 16 x half> @intrinsic_vrgather_mask_vv_nxv16f16_nxv16f16_nxv16i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vv v16, v20, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1282,7 +1282,7 @@ define <vscale x 32 x half> @intrinsic_vrgather_mask_vv_nxv32f16_nxv32f16_nxv32i
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m8,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a1)
 ; CHECK-NEXT:    vle16.v v24, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e16,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vv v16, v24, v8, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1327,7 +1327,7 @@ declare <vscale x 1 x float> @llvm.riscv.vrgather.mask.nxv1f32.nxv1i32(
 define <vscale x 1 x float> @intrinsic_vrgather_mask_vv_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1f32_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1372,7 +1372,7 @@ declare <vscale x 2 x float> @llvm.riscv.vrgather.mask.nxv2f32.nxv2i32(
 define <vscale x 2 x float> @intrinsic_vrgather_mask_vv_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2f32_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1417,7 +1417,7 @@ declare <vscale x 4 x float> @llvm.riscv.vrgather.mask.nxv4f32.nxv4i32(
 define <vscale x 4 x float> @intrinsic_vrgather_mask_vv_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4f32_nxv4f32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vv v16, v18, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1464,7 +1464,7 @@ define <vscale x 8 x float> @intrinsic_vrgather_mask_vv_nxv8f32_nxv8f32_nxv8i32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vv v16, v20, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1514,7 +1514,7 @@ define <vscale x 16 x float> @intrinsic_vrgather_mask_vv_nxv16f32_nxv16f32_nxv16
 ; CHECK-NEXT:    vsetvli a3, zero, e32,m8,ta,mu
 ; CHECK-NEXT:    vle32.v v8, (a1)
 ; CHECK-NEXT:    vle32.v v24, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e32,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vv v16, v24, v8, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1559,7 +1559,7 @@ declare <vscale x 1 x double> @llvm.riscv.vrgather.mask.nxv1f64.nxv1i64(
 define <vscale x 1 x double> @intrinsic_vrgather_mask_vv_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1f64_nxv1f64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1604,7 +1604,7 @@ declare <vscale x 2 x double> @llvm.riscv.vrgather.mask.nxv2f64.nxv2i64(
 define <vscale x 2 x double> @intrinsic_vrgather_mask_vv_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2f64_nxv2f64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vv v16, v18, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1651,7 +1651,7 @@ define <vscale x 4 x double> @intrinsic_vrgather_mask_vv_nxv4f64_nxv4f64_nxv4i64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e64,m4,ta,mu
 ; CHECK-NEXT:    vle64.v v28, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vv v16, v20, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1701,7 +1701,7 @@ define <vscale x 8 x double> @intrinsic_vrgather_mask_vv_nxv8f64_nxv8f64_nxv8i64
 ; CHECK-NEXT:    vsetvli a3, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vle64.v v8, (a1)
 ; CHECK-NEXT:    vle64.v v24, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e64,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vv v16, v24, v8, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1746,7 +1746,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vrgather.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vrgather_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1i8_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vrgather.vx v16, v17, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1791,7 +1791,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vrgather.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vrgather_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2i8_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vrgather.vx v16, v17, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1836,7 +1836,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vrgather.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vrgather_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4i8_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vx v16, v17, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1881,7 +1881,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vrgather.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vrgather_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8i8_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vx v16, v17, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1926,7 +1926,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vrgather.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vrgather_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16i8_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vx v16, v18, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1971,7 +1971,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vrgather.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vrgather_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv32i8_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vx v16, v20, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -2018,7 +2018,7 @@ define <vscale x 64 x i8> @intrinsic_vrgather_mask_vx_nxv64i8_nxv64i8_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a3, zero, e8,m8,ta,mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e8,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e8,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vx v16, v8, a1, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -2063,7 +2063,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vrgather.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vrgather_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1i16_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vrgather.vx v16, v17, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -2108,7 +2108,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vrgather.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vrgather_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2i16_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vx v16, v17, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -2153,7 +2153,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vrgather.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vrgather_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4i16_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vx v16, v17, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -2198,7 +2198,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vrgather.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vrgather_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8i16_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vx v16, v18, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -2243,7 +2243,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vrgather.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vrgather_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16i16_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vx v16, v20, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -2290,7 +2290,7 @@ define <vscale x 32 x i16> @intrinsic_vrgather_mask_vx_nxv32i16_nxv32i16_i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m8,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e16,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vx v16, v8, a1, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -2335,7 +2335,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vrgather.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vrgather_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1i32_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vx v16, v17, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -2380,7 +2380,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vrgather.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vrgather_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2i32_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vx v16, v17, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -2425,7 +2425,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vrgather.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vrgather_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4i32_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vx v16, v18, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -2470,7 +2470,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vrgather.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vrgather_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8i32_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vx v16, v20, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -2517,7 +2517,7 @@ define <vscale x 16 x i32> @intrinsic_vrgather_mask_vx_nxv16i32_nxv16i32_i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a3, zero, e32,m8,ta,mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e32,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vx v16, v8, a1, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -2562,7 +2562,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vrgather.mask.nxv1i64.i64(
 define <vscale x 1 x i64> @intrinsic_vrgather_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1i64_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vx v16, v17, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -2607,7 +2607,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vrgather.mask.nxv2i64.i64(
 define <vscale x 2 x i64> @intrinsic_vrgather_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2i64_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vx v16, v18, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -2652,7 +2652,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vrgather.mask.nxv4i64.i64(
 define <vscale x 4 x i64> @intrinsic_vrgather_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4i64_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vx v16, v20, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -2699,7 +2699,7 @@ define <vscale x 8 x i64> @intrinsic_vrgather_mask_vx_nxv8i64_nxv8i64_i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a3, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e64,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vx v16, v8, a1, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -2744,7 +2744,7 @@ declare <vscale x 1 x half> @llvm.riscv.vrgather.mask.nxv1f16.i16(
 define <vscale x 1 x half> @intrinsic_vrgather_mask_vx_nxv1f16_nxv1f16_i16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1f16_nxv1f16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vrgather.vx v16, v17, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -2789,7 +2789,7 @@ declare <vscale x 2 x half> @llvm.riscv.vrgather.mask.nxv2f16.i16(
 define <vscale x 2 x half> @intrinsic_vrgather_mask_vx_nxv2f16_nxv2f16_i16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2f16_nxv2f16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vx v16, v17, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -2834,7 +2834,7 @@ declare <vscale x 4 x half> @llvm.riscv.vrgather.mask.nxv4f16.i16(
 define <vscale x 4 x half> @intrinsic_vrgather_mask_vx_nxv4f16_nxv4f16_i16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4f16_nxv4f16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vx v16, v17, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -2879,7 +2879,7 @@ declare <vscale x 8 x half> @llvm.riscv.vrgather.mask.nxv8f16.i16(
 define <vscale x 8 x half> @intrinsic_vrgather_mask_vx_nxv8f16_nxv8f16_i16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8f16_nxv8f16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vx v16, v18, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -2924,7 +2924,7 @@ declare <vscale x 16 x half> @llvm.riscv.vrgather.mask.nxv16f16.i16(
 define <vscale x 16 x half> @intrinsic_vrgather_mask_vx_nxv16f16_nxv16f16_i16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16f16_nxv16f16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vx v16, v20, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -2971,7 +2971,7 @@ define <vscale x 32 x half> @intrinsic_vrgather_mask_vx_nxv32f16_nxv32f16_i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m8,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e16,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vx v16, v8, a1, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -3016,7 +3016,7 @@ declare <vscale x 1 x float> @llvm.riscv.vrgather.mask.nxv1f32.i32(
 define <vscale x 1 x float> @intrinsic_vrgather_mask_vx_nxv1f32_nxv1f32_i32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1f32_nxv1f32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vx v16, v17, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -3061,7 +3061,7 @@ declare <vscale x 2 x float> @llvm.riscv.vrgather.mask.nxv2f32.i32(
 define <vscale x 2 x float> @intrinsic_vrgather_mask_vx_nxv2f32_nxv2f32_i32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2f32_nxv2f32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vx v16, v17, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -3106,7 +3106,7 @@ declare <vscale x 4 x float> @llvm.riscv.vrgather.mask.nxv4f32.i32(
 define <vscale x 4 x float> @intrinsic_vrgather_mask_vx_nxv4f32_nxv4f32_i32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4f32_nxv4f32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vx v16, v18, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -3151,7 +3151,7 @@ declare <vscale x 8 x float> @llvm.riscv.vrgather.mask.nxv8f32.i32(
 define <vscale x 8 x float> @intrinsic_vrgather_mask_vx_nxv8f32_nxv8f32_i32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8f32_nxv8f32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vx v16, v20, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -3198,7 +3198,7 @@ define <vscale x 16 x float> @intrinsic_vrgather_mask_vx_nxv16f32_nxv16f32_i32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a3, zero, e32,m8,ta,mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e32,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vx v16, v8, a1, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -3243,7 +3243,7 @@ declare <vscale x 1 x double> @llvm.riscv.vrgather.mask.nxv1f64.i64(
 define <vscale x 1 x double> @intrinsic_vrgather_mask_vx_nxv1f64_nxv1f64_i64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1f64_nxv1f64_i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vx v16, v17, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -3288,7 +3288,7 @@ declare <vscale x 2 x double> @llvm.riscv.vrgather.mask.nxv2f64.i64(
 define <vscale x 2 x double> @intrinsic_vrgather_mask_vx_nxv2f64_nxv2f64_i64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2f64_nxv2f64_i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vx v16, v18, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -3333,7 +3333,7 @@ declare <vscale x 4 x double> @llvm.riscv.vrgather.mask.nxv4f64.i64(
 define <vscale x 4 x double> @intrinsic_vrgather_mask_vx_nxv4f64_nxv4f64_i64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4f64_nxv4f64_i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vx v16, v20, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -3380,7 +3380,7 @@ define <vscale x 8 x double> @intrinsic_vrgather_mask_vx_nxv8f64_nxv8f64_i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a3, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e64,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vx v16, v8, a1, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -3413,7 +3413,7 @@ entry:
 define <vscale x 1 x i8> @intrinsic_vrgather_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1i8_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vrgather.vi v16, v17, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -3446,7 +3446,7 @@ entry:
 define <vscale x 2 x i8> @intrinsic_vrgather_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2i8_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vrgather.vi v16, v17, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -3479,7 +3479,7 @@ entry:
 define <vscale x 4 x i8> @intrinsic_vrgather_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4i8_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vi v16, v17, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -3512,7 +3512,7 @@ entry:
 define <vscale x 8 x i8> @intrinsic_vrgather_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8i8_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vi v16, v17, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -3545,7 +3545,7 @@ entry:
 define <vscale x 16 x i8> @intrinsic_vrgather_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16i8_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vi v16, v18, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -3578,7 +3578,7 @@ entry:
 define <vscale x 32 x i8> @intrinsic_vrgather_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv32i8_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vi v16, v20, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -3613,7 +3613,7 @@ define <vscale x 64 x i8> @intrinsic_vrgather_mask_vi_nxv64i8_nxv64i8_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e8,m8,ta,mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e8,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vi v16, v8, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -3646,7 +3646,7 @@ entry:
 define <vscale x 1 x i16> @intrinsic_vrgather_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1i16_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vrgather.vi v16, v17, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -3679,7 +3679,7 @@ entry:
 define <vscale x 2 x i16> @intrinsic_vrgather_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2i16_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vi v16, v17, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -3712,7 +3712,7 @@ entry:
 define <vscale x 4 x i16> @intrinsic_vrgather_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4i16_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vi v16, v17, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -3745,7 +3745,7 @@ entry:
 define <vscale x 8 x i16> @intrinsic_vrgather_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8i16_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vi v16, v18, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -3778,7 +3778,7 @@ entry:
 define <vscale x 16 x i16> @intrinsic_vrgather_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16i16_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vi v16, v20, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -3813,7 +3813,7 @@ define <vscale x 32 x i16> @intrinsic_vrgather_mask_vi_nxv32i16_nxv32i16_i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e16,m8,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vi v16, v8, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -3846,7 +3846,7 @@ entry:
 define <vscale x 1 x i32> @intrinsic_vrgather_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1i32_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vi v16, v17, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -3879,7 +3879,7 @@ entry:
 define <vscale x 2 x i32> @intrinsic_vrgather_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2i32_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vi v16, v17, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -3912,7 +3912,7 @@ entry:
 define <vscale x 4 x i32> @intrinsic_vrgather_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4i32_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vi v16, v18, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -3945,7 +3945,7 @@ entry:
 define <vscale x 8 x i32> @intrinsic_vrgather_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8i32_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vi v16, v20, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -3980,7 +3980,7 @@ define <vscale x 16 x i32> @intrinsic_vrgather_mask_vi_nxv16i32_nxv16i32_i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e32,m8,ta,mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vi v16, v8, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -4013,7 +4013,7 @@ entry:
 define <vscale x 1 x i64> @intrinsic_vrgather_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1i64_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vi v16, v17, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -4046,7 +4046,7 @@ entry:
 define <vscale x 2 x i64> @intrinsic_vrgather_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2i64_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vi v16, v18, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -4079,7 +4079,7 @@ entry:
 define <vscale x 4 x i64> @intrinsic_vrgather_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4i64_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vi v16, v20, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -4114,7 +4114,7 @@ define <vscale x 8 x i64> @intrinsic_vrgather_mask_vi_nxv8i64_nxv8i64_i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e64,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vi v16, v8, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -4147,7 +4147,7 @@ entry:
 define <vscale x 1 x half> @intrinsic_vrgather_mask_vi_nxv1f16_nxv1f16_i16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1f16_nxv1f16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vrgather.vi v16, v17, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -4180,7 +4180,7 @@ entry:
 define <vscale x 2 x half> @intrinsic_vrgather_mask_vi_nxv2f16_nxv2f16_i16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2f16_nxv2f16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vi v16, v17, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -4213,7 +4213,7 @@ entry:
 define <vscale x 4 x half> @intrinsic_vrgather_mask_vi_nxv4f16_nxv4f16_i16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4f16_nxv4f16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vi v16, v17, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -4246,7 +4246,7 @@ entry:
 define <vscale x 8 x half> @intrinsic_vrgather_mask_vi_nxv8f16_nxv8f16_i16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8f16_nxv8f16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vi v16, v18, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -4279,7 +4279,7 @@ entry:
 define <vscale x 16 x half> @intrinsic_vrgather_mask_vi_nxv16f16_nxv16f16_i16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16f16_nxv16f16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vi v16, v20, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -4314,7 +4314,7 @@ define <vscale x 32 x half> @intrinsic_vrgather_mask_vi_nxv32f16_nxv32f16_i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e16,m8,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vi v16, v8, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -4347,7 +4347,7 @@ entry:
 define <vscale x 1 x float> @intrinsic_vrgather_mask_vi_nxv1f32_nxv1f32_i32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1f32_nxv1f32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vi v16, v17, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -4380,7 +4380,7 @@ entry:
 define <vscale x 2 x float> @intrinsic_vrgather_mask_vi_nxv2f32_nxv2f32_i32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2f32_nxv2f32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vi v16, v17, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -4413,7 +4413,7 @@ entry:
 define <vscale x 4 x float> @intrinsic_vrgather_mask_vi_nxv4f32_nxv4f32_i32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4f32_nxv4f32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vi v16, v18, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -4446,7 +4446,7 @@ entry:
 define <vscale x 8 x float> @intrinsic_vrgather_mask_vi_nxv8f32_nxv8f32_i32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8f32_nxv8f32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vi v16, v20, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -4481,7 +4481,7 @@ define <vscale x 16 x float> @intrinsic_vrgather_mask_vi_nxv16f32_nxv16f32_i32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e32,m8,ta,mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vi v16, v8, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -4514,7 +4514,7 @@ entry:
 define <vscale x 1 x double> @intrinsic_vrgather_mask_vi_nxv1f64_nxv1f64_i64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1f64_nxv1f64_i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vi v16, v17, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -4547,7 +4547,7 @@ entry:
 define <vscale x 2 x double> @intrinsic_vrgather_mask_vi_nxv2f64_nxv2f64_i64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2f64_nxv2f64_i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vi v16, v18, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -4580,7 +4580,7 @@ entry:
 define <vscale x 4 x double> @intrinsic_vrgather_mask_vi_nxv4f64_nxv4f64_i64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4f64_nxv4f64_i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vi v16, v20, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -4615,7 +4615,7 @@ define <vscale x 8 x double> @intrinsic_vrgather_mask_vi_nxv8f64_nxv8f64_i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e64,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vi v16, v8, 9, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vrsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrsub-rv32.ll
index 108e4d5fae3f..8393d24b0776 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vrsub-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vrsub-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vrsub.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vrsub.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vrsub.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vrsub_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vrsub.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vrsub.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vrsub_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vrsub.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vrsub.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vrsub_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vrsub.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vrsub.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vrsub_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vrsub.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vrsub.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vrsub_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vrsub.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vrsub.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vrsub_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vrsub.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -308,7 +308,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vrsub.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vrsub_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vrsub.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vrsub.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vrsub_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vrsub.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vrsub.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vrsub_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vrsub.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vrsub.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vrsub_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vrsub.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vrsub.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vrsub_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vrsub.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vrsub.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vrsub_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vrsub.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vrsub_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -588,7 +588,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vrsub.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vrsub_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vrsub.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -628,7 +628,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vrsub.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vrsub_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vrsub.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -668,7 +668,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vrsub.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vrsub_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vrsub.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -708,7 +708,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vrsub.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vrsub_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vrsub.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -736,7 +736,7 @@ entry:
 define <vscale x 1 x i8> @intrinsic_vrsub_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vrsub.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -764,7 +764,7 @@ entry:
 define <vscale x 2 x i8> @intrinsic_vrsub_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vrsub.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -792,7 +792,7 @@ entry:
 define <vscale x 4 x i8> @intrinsic_vrsub_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vrsub.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -820,7 +820,7 @@ entry:
 define <vscale x 8 x i8> @intrinsic_vrsub_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vrsub.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -848,7 +848,7 @@ entry:
 define <vscale x 16 x i8> @intrinsic_vrsub_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vrsub.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -876,7 +876,7 @@ entry:
 define <vscale x 32 x i8> @intrinsic_vrsub_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vrsub.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -904,7 +904,7 @@ entry:
 define <vscale x 64 x i8> @intrinsic_vrsub_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vrsub.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -932,7 +932,7 @@ entry:
 define <vscale x 1 x i16> @intrinsic_vrsub_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vrsub.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -960,7 +960,7 @@ entry:
 define <vscale x 2 x i16> @intrinsic_vrsub_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vrsub.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -988,7 +988,7 @@ entry:
 define <vscale x 4 x i16> @intrinsic_vrsub_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vrsub.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1016,7 +1016,7 @@ entry:
 define <vscale x 8 x i16> @intrinsic_vrsub_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vrsub.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1044,7 +1044,7 @@ entry:
 define <vscale x 16 x i16> @intrinsic_vrsub_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vrsub.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1072,7 +1072,7 @@ entry:
 define <vscale x 32 x i16> @intrinsic_vrsub_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vrsub.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1100,7 +1100,7 @@ entry:
 define <vscale x 1 x i32> @intrinsic_vrsub_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1128,7 +1128,7 @@ entry:
 define <vscale x 2 x i32> @intrinsic_vrsub_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vrsub.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1156,7 +1156,7 @@ entry:
 define <vscale x 4 x i32> @intrinsic_vrsub_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vrsub.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1184,7 +1184,7 @@ entry:
 define <vscale x 8 x i32> @intrinsic_vrsub_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vrsub.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1212,7 +1212,7 @@ entry:
 define <vscale x 16 x i32> @intrinsic_vrsub_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vrsub.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vrsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vrsub-rv64.ll
index 8d3dbdffaaae..5fbc1ee63d16 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vrsub-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vrsub-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vrsub.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vrsub.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vrsub.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vrsub_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vrsub.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vrsub.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vrsub_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vrsub.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vrsub.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vrsub_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vrsub.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vrsub.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vrsub_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vrsub.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vrsub.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vrsub_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vrsub.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vrsub.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vrsub_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vrsub.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -308,7 +308,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vrsub.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vrsub_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vrsub.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vrsub.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vrsub_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vrsub.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vrsub.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vrsub_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vrsub.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vrsub.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vrsub_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vrsub.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vrsub.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vrsub_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vrsub.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vrsub.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vrsub_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vrsub.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vrsub_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -588,7 +588,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vrsub.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vrsub_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vrsub.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -628,7 +628,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vrsub.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vrsub_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vrsub.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -668,7 +668,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vrsub.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vrsub_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vrsub.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -708,7 +708,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vrsub.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vrsub_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vrsub.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vrsub.mask.nxv1i64.i64(
 define <vscale x 1 x i64> @intrinsic_vrsub_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i64_nxv1i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vrsub.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -788,7 +788,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vrsub.mask.nxv2i64.i64(
 define <vscale x 2 x i64> @intrinsic_vrsub_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv2i64_nxv2i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vrsub.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -828,7 +828,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vrsub.mask.nxv4i64.i64(
 define <vscale x 4 x i64> @intrinsic_vrsub_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv4i64_nxv4i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vrsub.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -868,7 +868,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vrsub.mask.nxv8i64.i64(
 define <vscale x 8 x i64> @intrinsic_vrsub_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv8i64_nxv8i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vrsub.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -896,7 +896,7 @@ entry:
 define <vscale x 1 x i8> @intrinsic_vrsub_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vrsub.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -924,7 +924,7 @@ entry:
 define <vscale x 2 x i8> @intrinsic_vrsub_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vrsub.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -952,7 +952,7 @@ entry:
 define <vscale x 4 x i8> @intrinsic_vrsub_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vrsub.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -980,7 +980,7 @@ entry:
 define <vscale x 8 x i8> @intrinsic_vrsub_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vrsub.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1008,7 +1008,7 @@ entry:
 define <vscale x 16 x i8> @intrinsic_vrsub_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vrsub.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1036,7 +1036,7 @@ entry:
 define <vscale x 32 x i8> @intrinsic_vrsub_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vrsub.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1064,7 +1064,7 @@ entry:
 define <vscale x 64 x i8> @intrinsic_vrsub_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vrsub.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1092,7 +1092,7 @@ entry:
 define <vscale x 1 x i16> @intrinsic_vrsub_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vrsub.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1120,7 +1120,7 @@ entry:
 define <vscale x 2 x i16> @intrinsic_vrsub_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vrsub.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1148,7 +1148,7 @@ entry:
 define <vscale x 4 x i16> @intrinsic_vrsub_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vrsub.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1176,7 +1176,7 @@ entry:
 define <vscale x 8 x i16> @intrinsic_vrsub_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vrsub.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1204,7 +1204,7 @@ entry:
 define <vscale x 16 x i16> @intrinsic_vrsub_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vrsub.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1232,7 +1232,7 @@ entry:
 define <vscale x 32 x i16> @intrinsic_vrsub_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vrsub.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1260,7 +1260,7 @@ entry:
 define <vscale x 1 x i32> @intrinsic_vrsub_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1288,7 +1288,7 @@ entry:
 define <vscale x 2 x i32> @intrinsic_vrsub_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vrsub.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1316,7 +1316,7 @@ entry:
 define <vscale x 4 x i32> @intrinsic_vrsub_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vrsub.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1344,7 +1344,7 @@ entry:
 define <vscale x 8 x i32> @intrinsic_vrsub_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vrsub.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1372,7 +1372,7 @@ entry:
 define <vscale x 16 x i32> @intrinsic_vrsub_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vrsub.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1400,7 +1400,7 @@ entry:
 define <vscale x 1 x i64> @intrinsic_vrsub_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv1i64_nxv1i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vrsub.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1428,7 +1428,7 @@ entry:
 define <vscale x 2 x i64> @intrinsic_vrsub_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv2i64_nxv2i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vrsub.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1456,7 +1456,7 @@ entry:
 define <vscale x 4 x i64> @intrinsic_vrsub_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv4i64_nxv4i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vrsub.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1484,7 +1484,7 @@ entry:
 define <vscale x 8 x i64> @intrinsic_vrsub_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv8i64_nxv8i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vrsub.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsadd-rv32.ll
index 6b2d018cff0e..21e6e7a0148b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsadd-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsadd-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vsadd_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vsadd_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vsadd_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vsadd_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vsadd_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vsadd_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv64i8_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -308,7 +308,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vsadd_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vsadd_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vsadd_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vsadd_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vsadd_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vsadd_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv32i16_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vsadd_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -588,7 +588,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vsadd_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -628,7 +628,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vsadd_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -668,7 +668,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vsadd_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -708,7 +708,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vsadd_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv16i32_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vsadd.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vsadd_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vsadd.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -788,7 +788,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vsadd.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vsadd_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vsadd.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -828,7 +828,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vsadd.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vsadd_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vsadd.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -868,7 +868,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vsadd.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vsadd_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vsadd.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -908,7 +908,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vsadd.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vsadd_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vsadd.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -948,7 +948,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vsadd.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vsadd_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vsadd.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -988,7 +988,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vsadd.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vsadd_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vsadd.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vsadd.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vsadd_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vsadd.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vsadd.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vsadd_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vsadd.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vsadd.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vsadd_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vsadd.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vsadd.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vsadd_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vsadd.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vsadd.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vsadd_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vsadd.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1228,7 +1228,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vsadd.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vsadd_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vsadd.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1268,7 +1268,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vsadd.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vsadd_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vsadd.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1308,7 +1308,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vsadd.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vsadd_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vsadd.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1348,7 +1348,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vsadd.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vsadd_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vsadd.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1388,7 +1388,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vsadd.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vsadd_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vsadd.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1428,7 +1428,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vsadd.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vsadd_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vsadd.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1456,7 +1456,7 @@ entry:
 define <vscale x 1 x i8> @intrinsic_vsadd_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vsadd.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1484,7 +1484,7 @@ entry:
 define <vscale x 2 x i8> @intrinsic_vsadd_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vsadd.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1512,7 +1512,7 @@ entry:
 define <vscale x 4 x i8> @intrinsic_vsadd_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vsadd.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1540,7 +1540,7 @@ entry:
 define <vscale x 8 x i8> @intrinsic_vsadd_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vsadd.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1568,7 +1568,7 @@ entry:
 define <vscale x 16 x i8> @intrinsic_vsadd_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vsadd.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1596,7 +1596,7 @@ entry:
 define <vscale x 32 x i8> @intrinsic_vsadd_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vsadd.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1624,7 +1624,7 @@ entry:
 define <vscale x 64 x i8> @intrinsic_vsadd_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vsadd.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1652,7 +1652,7 @@ entry:
 define <vscale x 1 x i16> @intrinsic_vsadd_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vsadd.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1680,7 +1680,7 @@ entry:
 define <vscale x 2 x i16> @intrinsic_vsadd_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vsadd.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1708,7 +1708,7 @@ entry:
 define <vscale x 4 x i16> @intrinsic_vsadd_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vsadd.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1736,7 +1736,7 @@ entry:
 define <vscale x 8 x i16> @intrinsic_vsadd_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vsadd.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1764,7 +1764,7 @@ entry:
 define <vscale x 16 x i16> @intrinsic_vsadd_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vsadd.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1792,7 +1792,7 @@ entry:
 define <vscale x 32 x i16> @intrinsic_vsadd_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vsadd.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1820,7 +1820,7 @@ entry:
 define <vscale x 1 x i32> @intrinsic_vsadd_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vsadd.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1848,7 +1848,7 @@ entry:
 define <vscale x 2 x i32> @intrinsic_vsadd_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vsadd.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1876,7 +1876,7 @@ entry:
 define <vscale x 4 x i32> @intrinsic_vsadd_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vsadd.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1904,7 +1904,7 @@ entry:
 define <vscale x 8 x i32> @intrinsic_vsadd_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vsadd.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1932,7 +1932,7 @@ entry:
 define <vscale x 16 x i32> @intrinsic_vsadd_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vsadd.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsadd-rv64.ll
index 864f0b48b371..f116727dcef2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsadd-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsadd-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vsadd_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vsadd_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vsadd_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vsadd_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vsadd_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vsadd_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv64i8_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -308,7 +308,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vsadd_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vsadd_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vsadd_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vsadd_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vsadd_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vsadd_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv32i16_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vsadd_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -588,7 +588,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vsadd_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -628,7 +628,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vsadd_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -668,7 +668,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vsadd_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -708,7 +708,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vsadd_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv16i32_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vsadd_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i64_nxv1i64_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -788,7 +788,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vsadd.mask.nxv2i64.nxv2i64(
 define <vscale x 2 x i64> @intrinsic_vsadd_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv2i64_nxv2i64_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vsadd.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -828,7 +828,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vsadd.mask.nxv4i64.nxv4i64(
 define <vscale x 4 x i64> @intrinsic_vsadd_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv4i64_nxv4i64_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vsadd.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -868,7 +868,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64(
 define <vscale x 8 x i64> @intrinsic_vsadd_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv8i64_nxv8i64_nxv8i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -908,7 +908,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vsadd.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vsadd_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vsadd.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -948,7 +948,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vsadd.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vsadd_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vsadd.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -988,7 +988,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vsadd.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vsadd_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vsadd.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vsadd.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vsadd_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vsadd.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vsadd.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vsadd_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vsadd.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vsadd.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vsadd_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vsadd.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vsadd.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vsadd_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vsadd.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vsadd.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vsadd_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vsadd.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1228,7 +1228,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vsadd.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vsadd_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vsadd.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1268,7 +1268,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vsadd.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vsadd_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vsadd.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1308,7 +1308,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vsadd.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vsadd_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vsadd.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1348,7 +1348,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vsadd.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vsadd_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vsadd.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1388,7 +1388,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vsadd.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vsadd_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vsadd.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1428,7 +1428,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vsadd.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vsadd_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vsadd.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1468,7 +1468,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vsadd.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vsadd_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vsadd.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1508,7 +1508,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vsadd.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vsadd_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vsadd.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1548,7 +1548,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vsadd.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vsadd_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vsadd.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1588,7 +1588,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vsadd.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vsadd_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vsadd.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1628,7 +1628,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vsadd.mask.nxv1i64.i64(
 define <vscale x 1 x i64> @intrinsic_vsadd_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv1i64_nxv1i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vsadd.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1668,7 +1668,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vsadd.mask.nxv2i64.i64(
 define <vscale x 2 x i64> @intrinsic_vsadd_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv2i64_nxv2i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vsadd.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1708,7 +1708,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vsadd.mask.nxv4i64.i64(
 define <vscale x 4 x i64> @intrinsic_vsadd_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv4i64_nxv4i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vsadd.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1748,7 +1748,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vsadd.mask.nxv8i64.i64(
 define <vscale x 8 x i64> @intrinsic_vsadd_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv8i64_nxv8i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vsadd.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1776,7 +1776,7 @@ entry:
 define <vscale x 1 x i8> @intrinsic_vsadd_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vsadd.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1804,7 +1804,7 @@ entry:
 define <vscale x 2 x i8> @intrinsic_vsadd_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vsadd.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1832,7 +1832,7 @@ entry:
 define <vscale x 4 x i8> @intrinsic_vsadd_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vsadd.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1860,7 +1860,7 @@ entry:
 define <vscale x 8 x i8> @intrinsic_vsadd_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vsadd.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1888,7 +1888,7 @@ entry:
 define <vscale x 16 x i8> @intrinsic_vsadd_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vsadd.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1916,7 +1916,7 @@ entry:
 define <vscale x 32 x i8> @intrinsic_vsadd_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vsadd.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1944,7 +1944,7 @@ entry:
 define <vscale x 64 x i8> @intrinsic_vsadd_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vsadd.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1972,7 +1972,7 @@ entry:
 define <vscale x 1 x i16> @intrinsic_vsadd_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vsadd.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2000,7 +2000,7 @@ entry:
 define <vscale x 2 x i16> @intrinsic_vsadd_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vsadd.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2028,7 +2028,7 @@ entry:
 define <vscale x 4 x i16> @intrinsic_vsadd_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vsadd.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2056,7 +2056,7 @@ entry:
 define <vscale x 8 x i16> @intrinsic_vsadd_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vsadd.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2084,7 +2084,7 @@ entry:
 define <vscale x 16 x i16> @intrinsic_vsadd_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vsadd.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2112,7 +2112,7 @@ entry:
 define <vscale x 32 x i16> @intrinsic_vsadd_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vsadd.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -2140,7 +2140,7 @@ entry:
 define <vscale x 1 x i32> @intrinsic_vsadd_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vsadd.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2168,7 +2168,7 @@ entry:
 define <vscale x 2 x i32> @intrinsic_vsadd_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vsadd.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2196,7 +2196,7 @@ entry:
 define <vscale x 4 x i32> @intrinsic_vsadd_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vsadd.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2224,7 +2224,7 @@ entry:
 define <vscale x 8 x i32> @intrinsic_vsadd_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vsadd.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2252,7 +2252,7 @@ entry:
 define <vscale x 16 x i32> @intrinsic_vsadd_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vsadd.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2280,7 +2280,7 @@ entry:
 define <vscale x 1 x i64> @intrinsic_vsadd_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv1i64_nxv1i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vsadd.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2308,7 +2308,7 @@ entry:
 define <vscale x 2 x i64> @intrinsic_vsadd_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv2i64_nxv2i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vsadd.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2336,7 +2336,7 @@ entry:
 define <vscale x 4 x i64> @intrinsic_vsadd_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv4i64_nxv4i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vsadd.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2364,7 +2364,7 @@ entry:
 define <vscale x 8 x i64> @intrinsic_vsadd_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv8i64_nxv8i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vsadd.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll
index 256b2ebc1a81..22827b558a10 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vsaddu.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vsaddu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vsaddu.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vsaddu.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vsaddu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vsaddu.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vsaddu.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vsaddu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vsaddu.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vsaddu.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vsaddu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vsaddu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vsaddu.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vsaddu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vsaddu.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vsaddu.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vsaddu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vsaddu.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vsaddu.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vsaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vsaddu.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -308,7 +308,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vsaddu.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vsaddu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vsaddu.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vsaddu.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vsaddu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vsaddu.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vsaddu.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vsaddu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vsaddu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vsaddu.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vsaddu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vsaddu.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vsaddu.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vsaddu_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vsaddu.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vsaddu.mask.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vsaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vsaddu.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vsaddu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -588,7 +588,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vsaddu.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vsaddu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vsaddu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -628,7 +628,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vsaddu.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vsaddu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vsaddu.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -668,7 +668,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vsaddu.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vsaddu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vsaddu.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -708,7 +708,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vsaddu.mask.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vsaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vsaddu.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vsaddu.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vsaddu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vsaddu.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -788,7 +788,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vsaddu.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vsaddu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vsaddu.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -828,7 +828,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vsaddu.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vsaddu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vsaddu.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -868,7 +868,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vsaddu.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vsaddu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vsaddu.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -908,7 +908,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vsaddu.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vsaddu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vsaddu.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -948,7 +948,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vsaddu.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vsaddu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vsaddu.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -988,7 +988,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vsaddu.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vsaddu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vsaddu.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vsaddu.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vsaddu_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vsaddu.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vsaddu.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vsaddu_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vsaddu.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vsaddu.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vsaddu_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vsaddu.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vsaddu.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vsaddu_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vsaddu.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vsaddu.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vsaddu_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vsaddu.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1228,7 +1228,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vsaddu.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vsaddu_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vsaddu.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1268,7 +1268,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vsaddu.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vsaddu_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vsaddu.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1308,7 +1308,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vsaddu.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vsaddu_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vsaddu.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1348,7 +1348,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vsaddu.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vsaddu_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vsaddu.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1388,7 +1388,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vsaddu.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vsaddu_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vsaddu.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1428,7 +1428,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vsaddu.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vsaddu_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vsaddu.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1456,7 +1456,7 @@ entry:
 define <vscale x 1 x i8> @intrinsic_vsaddu_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vsaddu.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1484,7 +1484,7 @@ entry:
 define <vscale x 2 x i8> @intrinsic_vsaddu_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vsaddu.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1512,7 +1512,7 @@ entry:
 define <vscale x 4 x i8> @intrinsic_vsaddu_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vsaddu.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1540,7 +1540,7 @@ entry:
 define <vscale x 8 x i8> @intrinsic_vsaddu_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vsaddu.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1568,7 +1568,7 @@ entry:
 define <vscale x 16 x i8> @intrinsic_vsaddu_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vsaddu.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1596,7 +1596,7 @@ entry:
 define <vscale x 32 x i8> @intrinsic_vsaddu_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vsaddu.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1624,7 +1624,7 @@ entry:
 define <vscale x 64 x i8> @intrinsic_vsaddu_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vsaddu.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1652,7 +1652,7 @@ entry:
 define <vscale x 1 x i16> @intrinsic_vsaddu_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vsaddu.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1680,7 +1680,7 @@ entry:
 define <vscale x 2 x i16> @intrinsic_vsaddu_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vsaddu.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1708,7 +1708,7 @@ entry:
 define <vscale x 4 x i16> @intrinsic_vsaddu_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vsaddu.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1736,7 +1736,7 @@ entry:
 define <vscale x 8 x i16> @intrinsic_vsaddu_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vsaddu.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1764,7 +1764,7 @@ entry:
 define <vscale x 16 x i16> @intrinsic_vsaddu_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vsaddu.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1792,7 +1792,7 @@ entry:
 define <vscale x 32 x i16> @intrinsic_vsaddu_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vsaddu.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1820,7 +1820,7 @@ entry:
 define <vscale x 1 x i32> @intrinsic_vsaddu_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vsaddu.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1848,7 +1848,7 @@ entry:
 define <vscale x 2 x i32> @intrinsic_vsaddu_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vsaddu.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1876,7 +1876,7 @@ entry:
 define <vscale x 4 x i32> @intrinsic_vsaddu_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vsaddu.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1904,7 +1904,7 @@ entry:
 define <vscale x 8 x i32> @intrinsic_vsaddu_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vsaddu.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1932,7 +1932,7 @@ entry:
 define <vscale x 16 x i32> @intrinsic_vsaddu_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vsaddu.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv64.ll
index f0e582df4663..1237ceda9573 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vsaddu.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vsaddu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vsaddu.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vsaddu.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vsaddu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vsaddu.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vsaddu.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vsaddu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vsaddu.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vsaddu.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vsaddu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vsaddu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vsaddu.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vsaddu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vsaddu.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vsaddu.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vsaddu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vsaddu.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vsaddu.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vsaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vsaddu.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -308,7 +308,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vsaddu.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vsaddu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vsaddu.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vsaddu.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vsaddu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vsaddu.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vsaddu.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vsaddu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vsaddu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vsaddu.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vsaddu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vsaddu.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vsaddu.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vsaddu_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vsaddu.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vsaddu.mask.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vsaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vsaddu.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vsaddu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -588,7 +588,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vsaddu.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vsaddu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vsaddu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -628,7 +628,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vsaddu.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vsaddu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vsaddu.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -668,7 +668,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vsaddu.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vsaddu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vsaddu.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -708,7 +708,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vsaddu.mask.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vsaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vsaddu.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vsaddu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv1i64_nxv1i64_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -788,7 +788,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vsaddu.mask.nxv2i64.nxv2i64(
 define <vscale x 2 x i64> @intrinsic_vsaddu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv2i64_nxv2i64_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vsaddu.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -828,7 +828,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vsaddu.mask.nxv4i64.nxv4i64(
 define <vscale x 4 x i64> @intrinsic_vsaddu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv4i64_nxv4i64_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vsaddu.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -868,7 +868,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vsaddu.mask.nxv8i64.nxv8i64(
 define <vscale x 8 x i64> @intrinsic_vsaddu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv8i64_nxv8i64_nxv8i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vsaddu.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -908,7 +908,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vsaddu.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vsaddu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vsaddu.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -948,7 +948,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vsaddu.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vsaddu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vsaddu.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -988,7 +988,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vsaddu.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vsaddu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vsaddu.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vsaddu.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vsaddu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vsaddu.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vsaddu.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vsaddu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vsaddu.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vsaddu.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vsaddu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vsaddu.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vsaddu.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vsaddu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vsaddu.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vsaddu.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vsaddu_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vsaddu.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1228,7 +1228,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vsaddu.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vsaddu_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vsaddu.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1268,7 +1268,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vsaddu.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vsaddu_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vsaddu.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1308,7 +1308,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vsaddu.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vsaddu_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vsaddu.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1348,7 +1348,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vsaddu.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vsaddu_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vsaddu.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1388,7 +1388,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vsaddu.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vsaddu_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vsaddu.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1428,7 +1428,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vsaddu.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vsaddu_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vsaddu.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1468,7 +1468,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vsaddu.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vsaddu_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vsaddu.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1508,7 +1508,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vsaddu.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vsaddu_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vsaddu.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1548,7 +1548,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vsaddu.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vsaddu_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vsaddu.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1588,7 +1588,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vsaddu.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vsaddu_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vsaddu.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1628,7 +1628,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vsaddu.mask.nxv1i64.i64(
 define <vscale x 1 x i64> @intrinsic_vsaddu_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv1i64_nxv1i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vsaddu.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1668,7 +1668,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vsaddu.mask.nxv2i64.i64(
 define <vscale x 2 x i64> @intrinsic_vsaddu_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv2i64_nxv2i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vsaddu.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1708,7 +1708,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vsaddu.mask.nxv4i64.i64(
 define <vscale x 4 x i64> @intrinsic_vsaddu_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv4i64_nxv4i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vsaddu.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1748,7 +1748,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vsaddu.mask.nxv8i64.i64(
 define <vscale x 8 x i64> @intrinsic_vsaddu_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv8i64_nxv8i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vsaddu.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1776,7 +1776,7 @@ entry:
 define <vscale x 1 x i8> @intrinsic_vsaddu_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vsaddu.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1804,7 +1804,7 @@ entry:
 define <vscale x 2 x i8> @intrinsic_vsaddu_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vsaddu.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1832,7 +1832,7 @@ entry:
 define <vscale x 4 x i8> @intrinsic_vsaddu_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vsaddu.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1860,7 +1860,7 @@ entry:
 define <vscale x 8 x i8> @intrinsic_vsaddu_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vsaddu.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1888,7 +1888,7 @@ entry:
 define <vscale x 16 x i8> @intrinsic_vsaddu_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vsaddu.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1916,7 +1916,7 @@ entry:
 define <vscale x 32 x i8> @intrinsic_vsaddu_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vsaddu.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1944,7 +1944,7 @@ entry:
 define <vscale x 64 x i8> @intrinsic_vsaddu_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vsaddu.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1972,7 +1972,7 @@ entry:
 define <vscale x 1 x i16> @intrinsic_vsaddu_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vsaddu.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2000,7 +2000,7 @@ entry:
 define <vscale x 2 x i16> @intrinsic_vsaddu_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vsaddu.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2028,7 +2028,7 @@ entry:
 define <vscale x 4 x i16> @intrinsic_vsaddu_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vsaddu.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2056,7 +2056,7 @@ entry:
 define <vscale x 8 x i16> @intrinsic_vsaddu_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vsaddu.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2084,7 +2084,7 @@ entry:
 define <vscale x 16 x i16> @intrinsic_vsaddu_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vsaddu.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2112,7 +2112,7 @@ entry:
 define <vscale x 32 x i16> @intrinsic_vsaddu_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vsaddu.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -2140,7 +2140,7 @@ entry:
 define <vscale x 1 x i32> @intrinsic_vsaddu_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vsaddu.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2168,7 +2168,7 @@ entry:
 define <vscale x 2 x i32> @intrinsic_vsaddu_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vsaddu.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2196,7 +2196,7 @@ entry:
 define <vscale x 4 x i32> @intrinsic_vsaddu_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vsaddu.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2224,7 +2224,7 @@ entry:
 define <vscale x 8 x i32> @intrinsic_vsaddu_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vsaddu.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2252,7 +2252,7 @@ entry:
 define <vscale x 16 x i32> @intrinsic_vsaddu_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vsaddu.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2280,7 +2280,7 @@ entry:
 define <vscale x 1 x i64> @intrinsic_vsaddu_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv1i64_nxv1i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vsaddu.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2308,7 +2308,7 @@ entry:
 define <vscale x 2 x i64> @intrinsic_vsaddu_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv2i64_nxv2i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vsaddu.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2336,7 +2336,7 @@ entry:
 define <vscale x 4 x i64> @intrinsic_vsaddu_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv4i64_nxv4i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vsaddu.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2364,7 +2364,7 @@ entry:
 define <vscale x 8 x i64> @intrinsic_vsaddu_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv8i64_nxv8i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vsaddu.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv32.ll
index 1ff3600d7c9a..6c8e51b05aac 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv32.ll
@@ -31,7 +31,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vslide1down.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vslide1down_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv1i8_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v16, v17, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -75,7 +75,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vslide1down.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vslide1down_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv2i8_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v16, v17, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -119,7 +119,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vslide1down.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vslide1down_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv4i8_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v16, v17, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -163,7 +163,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vslide1down.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vslide1down_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv8i8_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v16, v17, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -207,7 +207,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vslide1down.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vslide1down_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv16i8_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v16, v18, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -251,7 +251,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vslide1down.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vslide1down_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv32i8_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v16, v20, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -297,7 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vslide1down_mask_vx_nxv64i8_nxv64i8_i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a3, zero, e8,m8,ta,mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e8,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e8,m8,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v16, v8, a1, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -341,7 +341,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vslide1down.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vslide1down_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv1i16_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v16, v17, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -385,7 +385,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vslide1down.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vslide1down_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv2i16_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v16, v17, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -429,7 +429,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vslide1down.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vslide1down_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv4i16_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v16, v17, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -473,7 +473,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vslide1down.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vslide1down_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv8i16_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v16, v18, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -517,7 +517,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vslide1down.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vslide1down_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv16i16_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v16, v20, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -563,7 +563,7 @@ define <vscale x 32 x i16> @intrinsic_vslide1down_mask_vx_nxv32i16_nxv32i16_i16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m8,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e16,m8,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v16, v8, a1, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -607,7 +607,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vslide1down.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vslide1down_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv1i32_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v16, v17, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -651,7 +651,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vslide1down.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vslide1down_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv2i32_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v16, v17, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -695,7 +695,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vslide1down.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vslide1down_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv4i32_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v16, v18, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -739,7 +739,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vslide1down.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vslide1down_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv8i32_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v16, v20, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -785,7 +785,7 @@ define <vscale x 16 x i32> @intrinsic_vslide1down_mask_vx_nxv16i32_nxv16i32_i32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a3, zero, e32,m8,ta,mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e32,m8,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v16, v8, a1, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv64.ll
index c86475c75056..51a677ba18c2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv64.ll
@@ -31,7 +31,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vslide1down.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vslide1down_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv1i8_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v16, v17, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -75,7 +75,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vslide1down.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vslide1down_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv2i8_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v16, v17, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -119,7 +119,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vslide1down.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vslide1down_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv4i8_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v16, v17, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -163,7 +163,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vslide1down.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vslide1down_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv8i8_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v16, v17, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -207,7 +207,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vslide1down.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vslide1down_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv16i8_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v16, v18, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -251,7 +251,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vslide1down.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vslide1down_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv32i8_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v16, v20, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -297,7 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vslide1down_mask_vx_nxv64i8_nxv64i8_i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a3, zero, e8,m8,ta,mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e8,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e8,m8,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v16, v8, a1, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -341,7 +341,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vslide1down.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vslide1down_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv1i16_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v16, v17, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -385,7 +385,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vslide1down.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vslide1down_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv2i16_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v16, v17, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -429,7 +429,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vslide1down.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vslide1down_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv4i16_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v16, v17, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -473,7 +473,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vslide1down.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vslide1down_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv8i16_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v16, v18, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -517,7 +517,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vslide1down.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vslide1down_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv16i16_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v16, v20, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -563,7 +563,7 @@ define <vscale x 32 x i16> @intrinsic_vslide1down_mask_vx_nxv32i16_nxv32i16_i16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m8,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e16,m8,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v16, v8, a1, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -607,7 +607,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vslide1down.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vslide1down_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv1i32_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v16, v17, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -651,7 +651,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vslide1down.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vslide1down_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv2i32_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v16, v17, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -695,7 +695,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vslide1down.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vslide1down_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv4i32_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v16, v18, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -739,7 +739,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vslide1down.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vslide1down_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv8i32_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v16, v20, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -785,7 +785,7 @@ define <vscale x 16 x i32> @intrinsic_vslide1down_mask_vx_nxv16i32_nxv16i32_i32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a3, zero, e32,m8,ta,mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e32,m8,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v16, v8, a1, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -829,7 +829,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vslide1down.mask.nxv1i64.i64(
 define <vscale x 1 x i64> @intrinsic_vslide1down_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv1i64_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v16, v17, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -873,7 +873,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vslide1down.mask.nxv2i64.i64(
 define <vscale x 2 x i64> @intrinsic_vslide1down_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv2i64_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v16, v18, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -917,7 +917,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vslide1down.mask.nxv4i64.i64(
 define <vscale x 4 x i64> @intrinsic_vslide1down_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv4i64_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v16, v20, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -963,7 +963,7 @@ define <vscale x 8 x i64> @intrinsic_vslide1down_mask_vx_nxv8i64_nxv8i64_i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a3, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e64,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v16, v8, a1, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv64.ll
index f514b6052891..9c13a6e09cee 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv64.ll
@@ -32,7 +32,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vslide1up.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vslide1up_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv1i8_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v16, v17, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -77,7 +77,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vslide1up.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vslide1up_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv2i8_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v16, v17, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -122,7 +122,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vslide1up.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vslide1up_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv4i8_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v16, v17, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -167,7 +167,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vslide1up.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vslide1up_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv8i8_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v16, v17, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -212,7 +212,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vslide1up.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vslide1up_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv16i8_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v16, v18, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -257,7 +257,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vslide1up.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vslide1up_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv32i8_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v16, v20, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -304,7 +304,7 @@ define <vscale x 64 x i8> @intrinsic_vslide1up_mask_vx_nxv64i8_nxv64i8_i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a3, zero, e8,m8,ta,mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e8,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e8,m8,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v16, v8, a1, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -349,7 +349,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vslide1up.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vslide1up_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv1i16_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v16, v17, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -394,7 +394,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vslide1up.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vslide1up_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv2i16_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v16, v17, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -439,7 +439,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vslide1up.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vslide1up_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv4i16_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v16, v17, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -484,7 +484,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vslide1up.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vslide1up_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv8i16_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v16, v18, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -529,7 +529,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vslide1up.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vslide1up_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv16i16_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v16, v20, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -576,7 +576,7 @@ define <vscale x 32 x i16> @intrinsic_vslide1up_mask_vx_nxv32i16_nxv32i16_i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m8,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e16,m8,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v16, v8, a1, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -621,7 +621,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vslide1up.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vslide1up_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv1i32_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v16, v17, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -666,7 +666,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vslide1up.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vslide1up_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv2i32_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v16, v17, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -711,7 +711,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vslide1up.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vslide1up_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv4i32_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v16, v18, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -756,7 +756,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vslide1up.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vslide1up_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv8i32_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v16, v20, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -803,7 +803,7 @@ define <vscale x 16 x i32> @intrinsic_vslide1up_mask_vx_nxv16i32_nxv16i32_i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a3, zero, e32,m8,ta,mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e32,m8,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v16, v8, a1, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -848,7 +848,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vslide1up.mask.nxv1i64.i64(
 define <vscale x 1 x i64> @intrinsic_vslide1up_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv1i64_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v16, v17, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -893,7 +893,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vslide1up.mask.nxv2i64.i64(
 define <vscale x 2 x i64> @intrinsic_vslide1up_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv2i64_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v16, v18, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -938,7 +938,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vslide1up.mask.nxv4i64.i64(
 define <vscale x 4 x i64> @intrinsic_vslide1up_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv4i64_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v16, v20, a0, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -985,7 +985,7 @@ define <vscale x 8 x i64> @intrinsic_vslide1up_mask_vx_nxv8i64_nxv8i64_i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a3, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e64,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v16, v8, a1, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv32.ll
index dd7acdf89294..3e2fb4ca7653 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv32.ll
@@ -9,7 +9,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vslidedown_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -30,7 +30,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vslidedown_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -45,7 +45,7 @@ entry:
 define <vscale x 1 x i8> @intrinsic_vslidedown_vi_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -59,7 +59,7 @@ entry:
 define <vscale x 1 x i8> @intrinsic_vslidedown_mask_vi_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -80,7 +80,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vslidedown_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -101,7 +101,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vslidedown_mask_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -116,7 +116,7 @@ entry:
 define <vscale x 2 x i8> @intrinsic_vslidedown_vi_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -130,7 +130,7 @@ entry:
 define <vscale x 2 x i8> @intrinsic_vslidedown_mask_vi_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -151,7 +151,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vslidedown_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -172,7 +172,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vslidedown_mask_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -187,7 +187,7 @@ entry:
 define <vscale x 4 x i8> @intrinsic_vslidedown_vi_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -201,7 +201,7 @@ entry:
 define <vscale x 4 x i8> @intrinsic_vslidedown_mask_vi_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -222,7 +222,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vslidedown_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -243,7 +243,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vslidedown_mask_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -258,7 +258,7 @@ entry:
 define <vscale x 8 x i8> @intrinsic_vslidedown_vi_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -272,7 +272,7 @@ entry:
 define <vscale x 8 x i8> @intrinsic_vslidedown_mask_vi_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -293,7 +293,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vslidedown_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -314,7 +314,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vslidedown_mask_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -329,7 +329,7 @@ entry:
 define <vscale x 16 x i8> @intrinsic_vslidedown_vi_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -343,7 +343,7 @@ entry:
 define <vscale x 16 x i8> @intrinsic_vslidedown_mask_vi_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -364,7 +364,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vslidedown_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -385,7 +385,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vslidedown_mask_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -400,7 +400,7 @@ entry:
 define <vscale x 32 x i8> @intrinsic_vslidedown_vi_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -414,7 +414,7 @@ entry:
 define <vscale x 32 x i8> @intrinsic_vslidedown_mask_vi_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -435,7 +435,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vslidedown_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -456,7 +456,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vslidedown_mask_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -471,7 +471,7 @@ entry:
 define <vscale x 1 x i16> @intrinsic_vslidedown_vi_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -485,7 +485,7 @@ entry:
 define <vscale x 1 x i16> @intrinsic_vslidedown_mask_vi_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -506,7 +506,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vslidedown_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -527,7 +527,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vslidedown_mask_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -542,7 +542,7 @@ entry:
 define <vscale x 2 x i16> @intrinsic_vslidedown_vi_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -556,7 +556,7 @@ entry:
 define <vscale x 2 x i16> @intrinsic_vslidedown_mask_vi_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -577,7 +577,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vslidedown_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -598,7 +598,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vslidedown_mask_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -613,7 +613,7 @@ entry:
 define <vscale x 4 x i16> @intrinsic_vslidedown_vi_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -627,7 +627,7 @@ entry:
 define <vscale x 4 x i16> @intrinsic_vslidedown_mask_vi_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -648,7 +648,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vslidedown_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -669,7 +669,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vslidedown_mask_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -684,7 +684,7 @@ entry:
 define <vscale x 8 x i16> @intrinsic_vslidedown_vi_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -698,7 +698,7 @@ entry:
 define <vscale x 8 x i16> @intrinsic_vslidedown_mask_vi_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -719,7 +719,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vslidedown_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -740,7 +740,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vslidedown_mask_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -755,7 +755,7 @@ entry:
 define <vscale x 16 x i16> @intrinsic_vslidedown_vi_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -769,7 +769,7 @@ entry:
 define <vscale x 16 x i16> @intrinsic_vslidedown_mask_vi_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -790,7 +790,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vslidedown_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -811,7 +811,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vslidedown_mask_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -826,7 +826,7 @@ entry:
 define <vscale x 1 x i32> @intrinsic_vslidedown_vi_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -840,7 +840,7 @@ entry:
 define <vscale x 1 x i32> @intrinsic_vslidedown_mask_vi_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -861,7 +861,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vslidedown_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -882,7 +882,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vslidedown_mask_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -897,7 +897,7 @@ entry:
 define <vscale x 2 x i32> @intrinsic_vslidedown_vi_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -911,7 +911,7 @@ entry:
 define <vscale x 2 x i32> @intrinsic_vslidedown_mask_vi_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -932,7 +932,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vslidedown_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -953,7 +953,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vslidedown_mask_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -968,7 +968,7 @@ entry:
 define <vscale x 4 x i32> @intrinsic_vslidedown_vi_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -982,7 +982,7 @@ entry:
 define <vscale x 4 x i32> @intrinsic_vslidedown_mask_vi_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1003,7 +1003,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vslidedown_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1024,7 +1024,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vslidedown_mask_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1039,7 +1039,7 @@ entry:
 define <vscale x 8 x i32> @intrinsic_vslidedown_vi_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1053,7 +1053,7 @@ entry:
 define <vscale x 8 x i32> @intrinsic_vslidedown_mask_vi_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1074,7 +1074,7 @@ declare <vscale x 1 x half> @llvm.riscv.vslidedown.nxv1f16(
 define <vscale x 1 x half> @intrinsic_vslidedown_vx_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 1 x half> @llvm.riscv.vslidedown.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1095,7 +1095,7 @@ declare <vscale x 1 x half> @llvm.riscv.vslidedown.mask.nxv1f16(
 define <vscale x 1 x half> @intrinsic_vslidedown_mask_vx_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vslidedown.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1110,7 +1110,7 @@ entry:
 define <vscale x 1 x half> @intrinsic_vslidedown_vi_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 1 x half> @llvm.riscv.vslidedown.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1124,7 +1124,7 @@ entry:
 define <vscale x 1 x half> @intrinsic_vslidedown_mask_vi_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vslidedown.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1145,7 +1145,7 @@ declare <vscale x 2 x half> @llvm.riscv.vslidedown.nxv2f16(
 define <vscale x 2 x half> @intrinsic_vslidedown_vx_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 2 x half> @llvm.riscv.vslidedown.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1166,7 +1166,7 @@ declare <vscale x 2 x half> @llvm.riscv.vslidedown.mask.nxv2f16(
 define <vscale x 2 x half> @intrinsic_vslidedown_mask_vx_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vslidedown.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1181,7 +1181,7 @@ entry:
 define <vscale x 2 x half> @intrinsic_vslidedown_vi_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 2 x half> @llvm.riscv.vslidedown.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1195,7 +1195,7 @@ entry:
 define <vscale x 2 x half> @intrinsic_vslidedown_mask_vi_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vslidedown.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1216,7 +1216,7 @@ declare <vscale x 4 x half> @llvm.riscv.vslidedown.nxv4f16(
 define <vscale x 4 x half> @intrinsic_vslidedown_vx_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 4 x half> @llvm.riscv.vslidedown.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1237,7 +1237,7 @@ declare <vscale x 4 x half> @llvm.riscv.vslidedown.mask.nxv4f16(
 define <vscale x 4 x half> @intrinsic_vslidedown_mask_vx_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vslidedown.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1252,7 +1252,7 @@ entry:
 define <vscale x 4 x half> @intrinsic_vslidedown_vi_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 4 x half> @llvm.riscv.vslidedown.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1266,7 +1266,7 @@ entry:
 define <vscale x 4 x half> @intrinsic_vslidedown_mask_vi_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vslidedown.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1287,7 +1287,7 @@ declare <vscale x 8 x half> @llvm.riscv.vslidedown.nxv8f16(
 define <vscale x 8 x half> @intrinsic_vslidedown_vx_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 8 x half> @llvm.riscv.vslidedown.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1308,7 +1308,7 @@ declare <vscale x 8 x half> @llvm.riscv.vslidedown.mask.nxv8f16(
 define <vscale x 8 x half> @intrinsic_vslidedown_mask_vx_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vslidedown.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1323,7 +1323,7 @@ entry:
 define <vscale x 8 x half> @intrinsic_vslidedown_vi_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 8 x half> @llvm.riscv.vslidedown.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1337,7 +1337,7 @@ entry:
 define <vscale x 8 x half> @intrinsic_vslidedown_mask_vi_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vslidedown.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1358,7 +1358,7 @@ declare <vscale x 16 x half> @llvm.riscv.vslidedown.nxv16f16(
 define <vscale x 16 x half> @intrinsic_vslidedown_vx_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 16 x half> @llvm.riscv.vslidedown.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1379,7 +1379,7 @@ declare <vscale x 16 x half> @llvm.riscv.vslidedown.mask.nxv16f16(
 define <vscale x 16 x half> @intrinsic_vslidedown_mask_vx_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vslidedown.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1394,7 +1394,7 @@ entry:
 define <vscale x 16 x half> @intrinsic_vslidedown_vi_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 16 x half> @llvm.riscv.vslidedown.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1408,7 +1408,7 @@ entry:
 define <vscale x 16 x half> @intrinsic_vslidedown_mask_vi_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vslidedown.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1429,7 +1429,7 @@ declare <vscale x 1 x float> @llvm.riscv.vslidedown.nxv1f32(
 define <vscale x 1 x float> @intrinsic_vslidedown_vx_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 1 x float> @llvm.riscv.vslidedown.nxv1f32(
     <vscale x 1 x float> %0,
@@ -1450,7 +1450,7 @@ declare <vscale x 1 x float> @llvm.riscv.vslidedown.mask.nxv1f32(
 define <vscale x 1 x float> @intrinsic_vslidedown_mask_vx_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vslidedown.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -1465,7 +1465,7 @@ entry:
 define <vscale x 1 x float> @intrinsic_vslidedown_vi_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 1 x float> @llvm.riscv.vslidedown.nxv1f32(
     <vscale x 1 x float> %0,
@@ -1479,7 +1479,7 @@ entry:
 define <vscale x 1 x float> @intrinsic_vslidedown_mask_vi_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vslidedown.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -1500,7 +1500,7 @@ declare <vscale x 2 x float> @llvm.riscv.vslidedown.nxv2f32(
 define <vscale x 2 x float> @intrinsic_vslidedown_vx_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 2 x float> @llvm.riscv.vslidedown.nxv2f32(
     <vscale x 2 x float> %0,
@@ -1521,7 +1521,7 @@ declare <vscale x 2 x float> @llvm.riscv.vslidedown.mask.nxv2f32(
 define <vscale x 2 x float> @intrinsic_vslidedown_mask_vx_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vslidedown.mask.nxv2f32(
     <vscale x 2 x float> %0,
@@ -1536,7 +1536,7 @@ entry:
 define <vscale x 2 x float> @intrinsic_vslidedown_vi_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 2 x float> @llvm.riscv.vslidedown.nxv2f32(
     <vscale x 2 x float> %0,
@@ -1550,7 +1550,7 @@ entry:
 define <vscale x 2 x float> @intrinsic_vslidedown_mask_vi_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vslidedown.mask.nxv2f32(
     <vscale x 2 x float> %0,
@@ -1571,7 +1571,7 @@ declare <vscale x 4 x float> @llvm.riscv.vslidedown.nxv4f32(
 define <vscale x 4 x float> @intrinsic_vslidedown_vx_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 4 x float> @llvm.riscv.vslidedown.nxv4f32(
     <vscale x 4 x float> %0,
@@ -1592,7 +1592,7 @@ declare <vscale x 4 x float> @llvm.riscv.vslidedown.mask.nxv4f32(
 define <vscale x 4 x float> @intrinsic_vslidedown_mask_vx_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vslidedown.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -1607,7 +1607,7 @@ entry:
 define <vscale x 4 x float> @intrinsic_vslidedown_vi_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 4 x float> @llvm.riscv.vslidedown.nxv4f32(
     <vscale x 4 x float> %0,
@@ -1621,7 +1621,7 @@ entry:
 define <vscale x 4 x float> @intrinsic_vslidedown_mask_vi_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vslidedown.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -1642,7 +1642,7 @@ declare <vscale x 8 x float> @llvm.riscv.vslidedown.nxv8f32(
 define <vscale x 8 x float> @intrinsic_vslidedown_vx_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 8 x float> @llvm.riscv.vslidedown.nxv8f32(
     <vscale x 8 x float> %0,
@@ -1663,7 +1663,7 @@ declare <vscale x 8 x float> @llvm.riscv.vslidedown.mask.nxv8f32(
 define <vscale x 8 x float> @intrinsic_vslidedown_mask_vx_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vslidedown.mask.nxv8f32(
     <vscale x 8 x float> %0,
@@ -1678,7 +1678,7 @@ entry:
 define <vscale x 8 x float> @intrinsic_vslidedown_vi_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 8 x float> @llvm.riscv.vslidedown.nxv8f32(
     <vscale x 8 x float> %0,
@@ -1692,7 +1692,7 @@ entry:
 define <vscale x 8 x float> @intrinsic_vslidedown_mask_vi_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vslidedown.mask.nxv8f32(
     <vscale x 8 x float> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv64.ll
index 154c67dc52f1..da5b3e432b2c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv64.ll
@@ -9,7 +9,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vslidedown_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -30,7 +30,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vslidedown_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -45,7 +45,7 @@ entry:
 define <vscale x 1 x i8> @intrinsic_vslidedown_vi_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -59,7 +59,7 @@ entry:
 define <vscale x 1 x i8> @intrinsic_vslidedown_mask_vi_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -80,7 +80,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vslidedown_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -101,7 +101,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vslidedown_mask_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -116,7 +116,7 @@ entry:
 define <vscale x 2 x i8> @intrinsic_vslidedown_vi_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -130,7 +130,7 @@ entry:
 define <vscale x 2 x i8> @intrinsic_vslidedown_mask_vi_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -151,7 +151,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vslidedown_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -172,7 +172,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vslidedown_mask_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -187,7 +187,7 @@ entry:
 define <vscale x 4 x i8> @intrinsic_vslidedown_vi_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -201,7 +201,7 @@ entry:
 define <vscale x 4 x i8> @intrinsic_vslidedown_mask_vi_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -222,7 +222,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vslidedown_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -243,7 +243,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vslidedown_mask_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -258,7 +258,7 @@ entry:
 define <vscale x 8 x i8> @intrinsic_vslidedown_vi_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -272,7 +272,7 @@ entry:
 define <vscale x 8 x i8> @intrinsic_vslidedown_mask_vi_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -293,7 +293,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vslidedown_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -314,7 +314,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vslidedown_mask_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -329,7 +329,7 @@ entry:
 define <vscale x 16 x i8> @intrinsic_vslidedown_vi_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -343,7 +343,7 @@ entry:
 define <vscale x 16 x i8> @intrinsic_vslidedown_mask_vi_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -364,7 +364,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vslidedown_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -385,7 +385,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vslidedown_mask_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -400,7 +400,7 @@ entry:
 define <vscale x 32 x i8> @intrinsic_vslidedown_vi_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -414,7 +414,7 @@ entry:
 define <vscale x 32 x i8> @intrinsic_vslidedown_mask_vi_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -435,7 +435,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vslidedown_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -456,7 +456,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vslidedown_mask_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -471,7 +471,7 @@ entry:
 define <vscale x 1 x i16> @intrinsic_vslidedown_vi_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -485,7 +485,7 @@ entry:
 define <vscale x 1 x i16> @intrinsic_vslidedown_mask_vi_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -506,7 +506,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vslidedown_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -527,7 +527,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vslidedown_mask_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -542,7 +542,7 @@ entry:
 define <vscale x 2 x i16> @intrinsic_vslidedown_vi_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -556,7 +556,7 @@ entry:
 define <vscale x 2 x i16> @intrinsic_vslidedown_mask_vi_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -577,7 +577,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vslidedown_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -598,7 +598,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vslidedown_mask_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -613,7 +613,7 @@ entry:
 define <vscale x 4 x i16> @intrinsic_vslidedown_vi_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -627,7 +627,7 @@ entry:
 define <vscale x 4 x i16> @intrinsic_vslidedown_mask_vi_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -648,7 +648,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vslidedown_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -669,7 +669,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vslidedown_mask_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -684,7 +684,7 @@ entry:
 define <vscale x 8 x i16> @intrinsic_vslidedown_vi_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -698,7 +698,7 @@ entry:
 define <vscale x 8 x i16> @intrinsic_vslidedown_mask_vi_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -719,7 +719,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vslidedown_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -740,7 +740,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vslidedown_mask_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -755,7 +755,7 @@ entry:
 define <vscale x 16 x i16> @intrinsic_vslidedown_vi_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -769,7 +769,7 @@ entry:
 define <vscale x 16 x i16> @intrinsic_vslidedown_mask_vi_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -790,7 +790,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vslidedown_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -811,7 +811,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vslidedown_mask_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -826,7 +826,7 @@ entry:
 define <vscale x 1 x i32> @intrinsic_vslidedown_vi_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -840,7 +840,7 @@ entry:
 define <vscale x 1 x i32> @intrinsic_vslidedown_mask_vi_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -861,7 +861,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vslidedown_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -882,7 +882,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vslidedown_mask_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -897,7 +897,7 @@ entry:
 define <vscale x 2 x i32> @intrinsic_vslidedown_vi_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -911,7 +911,7 @@ entry:
 define <vscale x 2 x i32> @intrinsic_vslidedown_mask_vi_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -932,7 +932,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vslidedown_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -953,7 +953,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vslidedown_mask_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -968,7 +968,7 @@ entry:
 define <vscale x 4 x i32> @intrinsic_vslidedown_vi_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -982,7 +982,7 @@ entry:
 define <vscale x 4 x i32> @intrinsic_vslidedown_mask_vi_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1003,7 +1003,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vslidedown_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1024,7 +1024,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vslidedown_mask_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1039,7 +1039,7 @@ entry:
 define <vscale x 8 x i32> @intrinsic_vslidedown_vi_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1053,7 +1053,7 @@ entry:
 define <vscale x 8 x i32> @intrinsic_vslidedown_mask_vi_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1074,7 +1074,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vslidedown_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i64_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1095,7 +1095,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vslidedown_mask_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i64_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1110,7 +1110,7 @@ entry:
 define <vscale x 1 x i64> @intrinsic_vslidedown_vi_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i64_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1124,7 +1124,7 @@ entry:
 define <vscale x 1 x i64> @intrinsic_vslidedown_mask_vi_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1i64_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1145,7 +1145,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64(
 define <vscale x 2 x i64> @intrinsic_vslidedown_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i64_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1166,7 +1166,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64(
 define <vscale x 2 x i64> @intrinsic_vslidedown_mask_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i64_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1181,7 +1181,7 @@ entry:
 define <vscale x 2 x i64> @intrinsic_vslidedown_vi_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i64_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1195,7 +1195,7 @@ entry:
 define <vscale x 2 x i64> @intrinsic_vslidedown_mask_vi_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2i64_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1216,7 +1216,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64(
 define <vscale x 4 x i64> @intrinsic_vslidedown_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i64_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1237,7 +1237,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64(
 define <vscale x 4 x i64> @intrinsic_vslidedown_mask_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i64_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1252,7 +1252,7 @@ entry:
 define <vscale x 4 x i64> @intrinsic_vslidedown_vi_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i64_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1266,7 +1266,7 @@ entry:
 define <vscale x 4 x i64> @intrinsic_vslidedown_mask_vi_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4i64_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1287,7 +1287,7 @@ declare <vscale x 1 x half> @llvm.riscv.vslidedown.nxv1f16(
 define <vscale x 1 x half> @intrinsic_vslidedown_vx_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i64 %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 1 x half> @llvm.riscv.vslidedown.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1308,7 +1308,7 @@ declare <vscale x 1 x half> @llvm.riscv.vslidedown.mask.nxv1f16(
 define <vscale x 1 x half> @intrinsic_vslidedown_mask_vx_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vslidedown.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1323,7 +1323,7 @@ entry:
 define <vscale x 1 x half> @intrinsic_vslidedown_vi_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 1 x half> @llvm.riscv.vslidedown.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1337,7 +1337,7 @@ entry:
 define <vscale x 1 x half> @intrinsic_vslidedown_mask_vi_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vslidedown.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1358,7 +1358,7 @@ declare <vscale x 2 x half> @llvm.riscv.vslidedown.nxv2f16(
 define <vscale x 2 x half> @intrinsic_vslidedown_vx_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i64 %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 2 x half> @llvm.riscv.vslidedown.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1379,7 +1379,7 @@ declare <vscale x 2 x half> @llvm.riscv.vslidedown.mask.nxv2f16(
 define <vscale x 2 x half> @intrinsic_vslidedown_mask_vx_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vslidedown.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1394,7 +1394,7 @@ entry:
 define <vscale x 2 x half> @intrinsic_vslidedown_vi_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 2 x half> @llvm.riscv.vslidedown.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1408,7 +1408,7 @@ entry:
 define <vscale x 2 x half> @intrinsic_vslidedown_mask_vi_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vslidedown.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1429,7 +1429,7 @@ declare <vscale x 4 x half> @llvm.riscv.vslidedown.nxv4f16(
 define <vscale x 4 x half> @intrinsic_vslidedown_vx_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i64 %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 4 x half> @llvm.riscv.vslidedown.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1450,7 +1450,7 @@ declare <vscale x 4 x half> @llvm.riscv.vslidedown.mask.nxv4f16(
 define <vscale x 4 x half> @intrinsic_vslidedown_mask_vx_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vslidedown.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1465,7 +1465,7 @@ entry:
 define <vscale x 4 x half> @intrinsic_vslidedown_vi_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 4 x half> @llvm.riscv.vslidedown.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1479,7 +1479,7 @@ entry:
 define <vscale x 4 x half> @intrinsic_vslidedown_mask_vi_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vslidedown.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1500,7 +1500,7 @@ declare <vscale x 8 x half> @llvm.riscv.vslidedown.nxv8f16(
 define <vscale x 8 x half> @intrinsic_vslidedown_vx_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i64 %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 8 x half> @llvm.riscv.vslidedown.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1521,7 +1521,7 @@ declare <vscale x 8 x half> @llvm.riscv.vslidedown.mask.nxv8f16(
 define <vscale x 8 x half> @intrinsic_vslidedown_mask_vx_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vslidedown.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1536,7 +1536,7 @@ entry:
 define <vscale x 8 x half> @intrinsic_vslidedown_vi_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 8 x half> @llvm.riscv.vslidedown.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1550,7 +1550,7 @@ entry:
 define <vscale x 8 x half> @intrinsic_vslidedown_mask_vi_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vslidedown.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1571,7 +1571,7 @@ declare <vscale x 16 x half> @llvm.riscv.vslidedown.nxv16f16(
 define <vscale x 16 x half> @intrinsic_vslidedown_vx_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i64 %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 16 x half> @llvm.riscv.vslidedown.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1592,7 +1592,7 @@ declare <vscale x 16 x half> @llvm.riscv.vslidedown.mask.nxv16f16(
 define <vscale x 16 x half> @intrinsic_vslidedown_mask_vx_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vslidedown.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1607,7 +1607,7 @@ entry:
 define <vscale x 16 x half> @intrinsic_vslidedown_vi_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 16 x half> @llvm.riscv.vslidedown.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1621,7 +1621,7 @@ entry:
 define <vscale x 16 x half> @intrinsic_vslidedown_mask_vi_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vslidedown.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1642,7 +1642,7 @@ declare <vscale x 1 x float> @llvm.riscv.vslidedown.nxv1f32(
 define <vscale x 1 x float> @intrinsic_vslidedown_vx_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i64 %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 1 x float> @llvm.riscv.vslidedown.nxv1f32(
     <vscale x 1 x float> %0,
@@ -1663,7 +1663,7 @@ declare <vscale x 1 x float> @llvm.riscv.vslidedown.mask.nxv1f32(
 define <vscale x 1 x float> @intrinsic_vslidedown_mask_vx_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vslidedown.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -1678,7 +1678,7 @@ entry:
 define <vscale x 1 x float> @intrinsic_vslidedown_vi_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 1 x float> @llvm.riscv.vslidedown.nxv1f32(
     <vscale x 1 x float> %0,
@@ -1692,7 +1692,7 @@ entry:
 define <vscale x 1 x float> @intrinsic_vslidedown_mask_vi_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vslidedown.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -1713,7 +1713,7 @@ declare <vscale x 2 x float> @llvm.riscv.vslidedown.nxv2f32(
 define <vscale x 2 x float> @intrinsic_vslidedown_vx_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i64 %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 2 x float> @llvm.riscv.vslidedown.nxv2f32(
     <vscale x 2 x float> %0,
@@ -1734,7 +1734,7 @@ declare <vscale x 2 x float> @llvm.riscv.vslidedown.mask.nxv2f32(
 define <vscale x 2 x float> @intrinsic_vslidedown_mask_vx_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vslidedown.mask.nxv2f32(
     <vscale x 2 x float> %0,
@@ -1749,7 +1749,7 @@ entry:
 define <vscale x 2 x float> @intrinsic_vslidedown_vi_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 2 x float> @llvm.riscv.vslidedown.nxv2f32(
     <vscale x 2 x float> %0,
@@ -1763,7 +1763,7 @@ entry:
 define <vscale x 2 x float> @intrinsic_vslidedown_mask_vi_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vslidedown.mask.nxv2f32(
     <vscale x 2 x float> %0,
@@ -1784,7 +1784,7 @@ declare <vscale x 4 x float> @llvm.riscv.vslidedown.nxv4f32(
 define <vscale x 4 x float> @intrinsic_vslidedown_vx_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i64 %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 4 x float> @llvm.riscv.vslidedown.nxv4f32(
     <vscale x 4 x float> %0,
@@ -1805,7 +1805,7 @@ declare <vscale x 4 x float> @llvm.riscv.vslidedown.mask.nxv4f32(
 define <vscale x 4 x float> @intrinsic_vslidedown_mask_vx_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vslidedown.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -1820,7 +1820,7 @@ entry:
 define <vscale x 4 x float> @intrinsic_vslidedown_vi_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 4 x float> @llvm.riscv.vslidedown.nxv4f32(
     <vscale x 4 x float> %0,
@@ -1834,7 +1834,7 @@ entry:
 define <vscale x 4 x float> @intrinsic_vslidedown_mask_vi_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vslidedown.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -1855,7 +1855,7 @@ declare <vscale x 8 x float> @llvm.riscv.vslidedown.nxv8f32(
 define <vscale x 8 x float> @intrinsic_vslidedown_vx_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i64 %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 8 x float> @llvm.riscv.vslidedown.nxv8f32(
     <vscale x 8 x float> %0,
@@ -1876,7 +1876,7 @@ declare <vscale x 8 x float> @llvm.riscv.vslidedown.mask.nxv8f32(
 define <vscale x 8 x float> @intrinsic_vslidedown_mask_vx_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vslidedown.mask.nxv8f32(
     <vscale x 8 x float> %0,
@@ -1891,7 +1891,7 @@ entry:
 define <vscale x 8 x float> @intrinsic_vslidedown_vi_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 8 x float> @llvm.riscv.vslidedown.nxv8f32(
     <vscale x 8 x float> %0,
@@ -1905,7 +1905,7 @@ entry:
 define <vscale x 8 x float> @intrinsic_vslidedown_mask_vi_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vslidedown.mask.nxv8f32(
     <vscale x 8 x float> %0,
@@ -1926,7 +1926,7 @@ declare <vscale x 1 x double> @llvm.riscv.vslidedown.nxv1f64(
 define <vscale x 1 x double> @intrinsic_vslidedown_vx_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i64 %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1f64_nxv1f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 1 x double> @llvm.riscv.vslidedown.nxv1f64(
     <vscale x 1 x double> %0,
@@ -1947,7 +1947,7 @@ declare <vscale x 1 x double> @llvm.riscv.vslidedown.mask.nxv1f64(
 define <vscale x 1 x double> @intrinsic_vslidedown_mask_vx_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1f64_nxv1f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 1 x double> @llvm.riscv.vslidedown.mask.nxv1f64(
     <vscale x 1 x double> %0,
@@ -1962,7 +1962,7 @@ entry:
 define <vscale x 1 x double> @intrinsic_vslidedown_vi_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1f64_nxv1f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 1 x double> @llvm.riscv.vslidedown.nxv1f64(
     <vscale x 1 x double> %0,
@@ -1976,7 +1976,7 @@ entry:
 define <vscale x 1 x double> @intrinsic_vslidedown_mask_vi_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1f64_nxv1f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x double> @llvm.riscv.vslidedown.mask.nxv1f64(
     <vscale x 1 x double> %0,
@@ -1997,7 +1997,7 @@ declare <vscale x 2 x double> @llvm.riscv.vslidedown.nxv2f64(
 define <vscale x 2 x double> @intrinsic_vslidedown_vx_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i64 %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2f64_nxv2f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 2 x double> @llvm.riscv.vslidedown.nxv2f64(
     <vscale x 2 x double> %0,
@@ -2018,7 +2018,7 @@ declare <vscale x 2 x double> @llvm.riscv.vslidedown.mask.nxv2f64(
 define <vscale x 2 x double> @intrinsic_vslidedown_mask_vx_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2f64_nxv2f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 2 x double> @llvm.riscv.vslidedown.mask.nxv2f64(
     <vscale x 2 x double> %0,
@@ -2033,7 +2033,7 @@ entry:
 define <vscale x 2 x double> @intrinsic_vslidedown_vi_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2f64_nxv2f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 2 x double> @llvm.riscv.vslidedown.nxv2f64(
     <vscale x 2 x double> %0,
@@ -2047,7 +2047,7 @@ entry:
 define <vscale x 2 x double> @intrinsic_vslidedown_mask_vi_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2f64_nxv2f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x double> @llvm.riscv.vslidedown.mask.nxv2f64(
     <vscale x 2 x double> %0,
@@ -2068,7 +2068,7 @@ declare <vscale x 4 x double> @llvm.riscv.vslidedown.nxv4f64(
 define <vscale x 4 x double> @intrinsic_vslidedown_vx_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i64 %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4f64_nxv4f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 4 x double> @llvm.riscv.vslidedown.nxv4f64(
     <vscale x 4 x double> %0,
@@ -2089,7 +2089,7 @@ declare <vscale x 4 x double> @llvm.riscv.vslidedown.mask.nxv4f64(
 define <vscale x 4 x double> @intrinsic_vslidedown_mask_vx_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4f64_nxv4f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 4 x double> @llvm.riscv.vslidedown.mask.nxv4f64(
     <vscale x 4 x double> %0,
@@ -2104,7 +2104,7 @@ entry:
 define <vscale x 4 x double> @intrinsic_vslidedown_vi_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4f64_nxv4f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 4 x double> @llvm.riscv.vslidedown.nxv4f64(
     <vscale x 4 x double> %0,
@@ -2118,7 +2118,7 @@ entry:
 define <vscale x 4 x double> @intrinsic_vslidedown_mask_vi_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4f64_nxv4f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x double> @llvm.riscv.vslidedown.mask.nxv4f64(
     <vscale x 4 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vslideup-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vslideup-rv32.ll
index 6a589eaafeaa..3a5c47b9112e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vslideup-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vslideup-rv32.ll
@@ -9,7 +9,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vslideup_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -30,7 +30,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -45,7 +45,7 @@ entry:
 define <vscale x 1 x i8> @intrinsic_vslideup_vi_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -59,7 +59,7 @@ entry:
 define <vscale x 1 x i8> @intrinsic_vslideup_mask_vi_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -80,7 +80,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vslideup_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -101,7 +101,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vslideup.mask.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vslideup_mask_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vslideup.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -116,7 +116,7 @@ entry:
 define <vscale x 2 x i8> @intrinsic_vslideup_vi_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -130,7 +130,7 @@ entry:
 define <vscale x 2 x i8> @intrinsic_vslideup_mask_vi_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vslideup.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -151,7 +151,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vslideup_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -172,7 +172,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vslideup.mask.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vslideup_mask_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vslideup.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -187,7 +187,7 @@ entry:
 define <vscale x 4 x i8> @intrinsic_vslideup_vi_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -201,7 +201,7 @@ entry:
 define <vscale x 4 x i8> @intrinsic_vslideup_mask_vi_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vslideup.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -222,7 +222,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vslideup_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -243,7 +243,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vslideup.mask.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vslideup_mask_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vslideup.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -258,7 +258,7 @@ entry:
 define <vscale x 8 x i8> @intrinsic_vslideup_vi_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -272,7 +272,7 @@ entry:
 define <vscale x 8 x i8> @intrinsic_vslideup_mask_vi_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vslideup.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -293,7 +293,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vslideup_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -314,7 +314,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vslideup.mask.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vslideup_mask_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vslideup.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -329,7 +329,7 @@ entry:
 define <vscale x 16 x i8> @intrinsic_vslideup_vi_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -343,7 +343,7 @@ entry:
 define <vscale x 16 x i8> @intrinsic_vslideup_mask_vi_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vslideup.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -364,7 +364,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vslideup_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -385,7 +385,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vslideup.mask.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vslideup_mask_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vslideup.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -400,7 +400,7 @@ entry:
 define <vscale x 32 x i8> @intrinsic_vslideup_vi_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -414,7 +414,7 @@ entry:
 define <vscale x 32 x i8> @intrinsic_vslideup_mask_vi_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vslideup.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -435,7 +435,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vslideup_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -456,7 +456,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vslideup.mask.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vslideup_mask_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vslideup.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -471,7 +471,7 @@ entry:
 define <vscale x 1 x i16> @intrinsic_vslideup_vi_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -485,7 +485,7 @@ entry:
 define <vscale x 1 x i16> @intrinsic_vslideup_mask_vi_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vslideup.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -506,7 +506,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vslideup_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -527,7 +527,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vslideup.mask.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vslideup_mask_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vslideup.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -542,7 +542,7 @@ entry:
 define <vscale x 2 x i16> @intrinsic_vslideup_vi_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -556,7 +556,7 @@ entry:
 define <vscale x 2 x i16> @intrinsic_vslideup_mask_vi_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vslideup.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -577,7 +577,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vslideup_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -598,7 +598,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vslideup.mask.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vslideup_mask_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vslideup.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -613,7 +613,7 @@ entry:
 define <vscale x 4 x i16> @intrinsic_vslideup_vi_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -627,7 +627,7 @@ entry:
 define <vscale x 4 x i16> @intrinsic_vslideup_mask_vi_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vslideup.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -648,7 +648,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vslideup_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -669,7 +669,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vslideup.mask.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vslideup_mask_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vslideup.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -684,7 +684,7 @@ entry:
 define <vscale x 8 x i16> @intrinsic_vslideup_vi_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -698,7 +698,7 @@ entry:
 define <vscale x 8 x i16> @intrinsic_vslideup_mask_vi_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vslideup.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -719,7 +719,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vslideup_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -740,7 +740,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vslideup.mask.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vslideup_mask_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vslideup.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -755,7 +755,7 @@ entry:
 define <vscale x 16 x i16> @intrinsic_vslideup_vi_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -769,7 +769,7 @@ entry:
 define <vscale x 16 x i16> @intrinsic_vslideup_mask_vi_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vslideup.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -790,7 +790,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vslideup_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -811,7 +811,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vslideup.mask.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vslideup_mask_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vslideup.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -826,7 +826,7 @@ entry:
 define <vscale x 1 x i32> @intrinsic_vslideup_vi_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -840,7 +840,7 @@ entry:
 define <vscale x 1 x i32> @intrinsic_vslideup_mask_vi_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vslideup.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -861,7 +861,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vslideup_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -882,7 +882,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vslideup.mask.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vslideup_mask_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vslideup.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -897,7 +897,7 @@ entry:
 define <vscale x 2 x i32> @intrinsic_vslideup_vi_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -911,7 +911,7 @@ entry:
 define <vscale x 2 x i32> @intrinsic_vslideup_mask_vi_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vslideup.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -932,7 +932,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vslideup_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -953,7 +953,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vslideup.mask.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vslideup_mask_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vslideup.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -968,7 +968,7 @@ entry:
 define <vscale x 4 x i32> @intrinsic_vslideup_vi_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -982,7 +982,7 @@ entry:
 define <vscale x 4 x i32> @intrinsic_vslideup_mask_vi_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vslideup.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1003,7 +1003,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vslideup_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1024,7 +1024,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vslideup.mask.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vslideup_mask_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vslideup.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1039,7 +1039,7 @@ entry:
 define <vscale x 8 x i32> @intrinsic_vslideup_vi_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1053,7 +1053,7 @@ entry:
 define <vscale x 8 x i32> @intrinsic_vslideup_mask_vi_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vslideup.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1074,7 +1074,7 @@ declare <vscale x 1 x half> @llvm.riscv.vslideup.nxv1f16(
 define <vscale x 1 x half> @intrinsic_vslideup_vx_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 1 x half> @llvm.riscv.vslideup.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1095,7 +1095,7 @@ declare <vscale x 1 x half> @llvm.riscv.vslideup.mask.nxv1f16(
 define <vscale x 1 x half> @intrinsic_vslideup_mask_vx_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vslideup.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1110,7 +1110,7 @@ entry:
 define <vscale x 1 x half> @intrinsic_vslideup_vi_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 1 x half> @llvm.riscv.vslideup.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1124,7 +1124,7 @@ entry:
 define <vscale x 1 x half> @intrinsic_vslideup_mask_vi_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vslideup.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1145,7 +1145,7 @@ declare <vscale x 2 x half> @llvm.riscv.vslideup.nxv2f16(
 define <vscale x 2 x half> @intrinsic_vslideup_vx_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 2 x half> @llvm.riscv.vslideup.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1166,7 +1166,7 @@ declare <vscale x 2 x half> @llvm.riscv.vslideup.mask.nxv2f16(
 define <vscale x 2 x half> @intrinsic_vslideup_mask_vx_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vslideup.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1181,7 +1181,7 @@ entry:
 define <vscale x 2 x half> @intrinsic_vslideup_vi_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 2 x half> @llvm.riscv.vslideup.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1195,7 +1195,7 @@ entry:
 define <vscale x 2 x half> @intrinsic_vslideup_mask_vi_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vslideup.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1216,7 +1216,7 @@ declare <vscale x 4 x half> @llvm.riscv.vslideup.nxv4f16(
 define <vscale x 4 x half> @intrinsic_vslideup_vx_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 4 x half> @llvm.riscv.vslideup.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1237,7 +1237,7 @@ declare <vscale x 4 x half> @llvm.riscv.vslideup.mask.nxv4f16(
 define <vscale x 4 x half> @intrinsic_vslideup_mask_vx_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vslideup.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1252,7 +1252,7 @@ entry:
 define <vscale x 4 x half> @intrinsic_vslideup_vi_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 4 x half> @llvm.riscv.vslideup.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1266,7 +1266,7 @@ entry:
 define <vscale x 4 x half> @intrinsic_vslideup_mask_vi_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vslideup.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1287,7 +1287,7 @@ declare <vscale x 8 x half> @llvm.riscv.vslideup.nxv8f16(
 define <vscale x 8 x half> @intrinsic_vslideup_vx_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 8 x half> @llvm.riscv.vslideup.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1308,7 +1308,7 @@ declare <vscale x 8 x half> @llvm.riscv.vslideup.mask.nxv8f16(
 define <vscale x 8 x half> @intrinsic_vslideup_mask_vx_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vslideup.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1323,7 +1323,7 @@ entry:
 define <vscale x 8 x half> @intrinsic_vslideup_vi_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 8 x half> @llvm.riscv.vslideup.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1337,7 +1337,7 @@ entry:
 define <vscale x 8 x half> @intrinsic_vslideup_mask_vi_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vslideup.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1358,7 +1358,7 @@ declare <vscale x 16 x half> @llvm.riscv.vslideup.nxv16f16(
 define <vscale x 16 x half> @intrinsic_vslideup_vx_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 16 x half> @llvm.riscv.vslideup.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1379,7 +1379,7 @@ declare <vscale x 16 x half> @llvm.riscv.vslideup.mask.nxv16f16(
 define <vscale x 16 x half> @intrinsic_vslideup_mask_vx_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vslideup.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1394,7 +1394,7 @@ entry:
 define <vscale x 16 x half> @intrinsic_vslideup_vi_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 16 x half> @llvm.riscv.vslideup.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1408,7 +1408,7 @@ entry:
 define <vscale x 16 x half> @intrinsic_vslideup_mask_vi_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vslideup.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1429,7 +1429,7 @@ declare <vscale x 1 x float> @llvm.riscv.vslideup.nxv1f32(
 define <vscale x 1 x float> @intrinsic_vslideup_vx_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 1 x float> @llvm.riscv.vslideup.nxv1f32(
     <vscale x 1 x float> %0,
@@ -1450,7 +1450,7 @@ declare <vscale x 1 x float> @llvm.riscv.vslideup.mask.nxv1f32(
 define <vscale x 1 x float> @intrinsic_vslideup_mask_vx_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vslideup.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -1465,7 +1465,7 @@ entry:
 define <vscale x 1 x float> @intrinsic_vslideup_vi_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 1 x float> @llvm.riscv.vslideup.nxv1f32(
     <vscale x 1 x float> %0,
@@ -1479,7 +1479,7 @@ entry:
 define <vscale x 1 x float> @intrinsic_vslideup_mask_vi_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vslideup.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -1500,7 +1500,7 @@ declare <vscale x 2 x float> @llvm.riscv.vslideup.nxv2f32(
 define <vscale x 2 x float> @intrinsic_vslideup_vx_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 2 x float> @llvm.riscv.vslideup.nxv2f32(
     <vscale x 2 x float> %0,
@@ -1521,7 +1521,7 @@ declare <vscale x 2 x float> @llvm.riscv.vslideup.mask.nxv2f32(
 define <vscale x 2 x float> @intrinsic_vslideup_mask_vx_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vslideup.mask.nxv2f32(
     <vscale x 2 x float> %0,
@@ -1536,7 +1536,7 @@ entry:
 define <vscale x 2 x float> @intrinsic_vslideup_vi_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 2 x float> @llvm.riscv.vslideup.nxv2f32(
     <vscale x 2 x float> %0,
@@ -1550,7 +1550,7 @@ entry:
 define <vscale x 2 x float> @intrinsic_vslideup_mask_vi_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vslideup.mask.nxv2f32(
     <vscale x 2 x float> %0,
@@ -1571,7 +1571,7 @@ declare <vscale x 4 x float> @llvm.riscv.vslideup.nxv4f32(
 define <vscale x 4 x float> @intrinsic_vslideup_vx_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 4 x float> @llvm.riscv.vslideup.nxv4f32(
     <vscale x 4 x float> %0,
@@ -1592,7 +1592,7 @@ declare <vscale x 4 x float> @llvm.riscv.vslideup.mask.nxv4f32(
 define <vscale x 4 x float> @intrinsic_vslideup_mask_vx_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vslideup.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -1607,7 +1607,7 @@ entry:
 define <vscale x 4 x float> @intrinsic_vslideup_vi_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 4 x float> @llvm.riscv.vslideup.nxv4f32(
     <vscale x 4 x float> %0,
@@ -1621,7 +1621,7 @@ entry:
 define <vscale x 4 x float> @intrinsic_vslideup_mask_vi_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vslideup.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -1642,7 +1642,7 @@ declare <vscale x 8 x float> @llvm.riscv.vslideup.nxv8f32(
 define <vscale x 8 x float> @intrinsic_vslideup_vx_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 8 x float> @llvm.riscv.vslideup.nxv8f32(
     <vscale x 8 x float> %0,
@@ -1663,7 +1663,7 @@ declare <vscale x 8 x float> @llvm.riscv.vslideup.mask.nxv8f32(
 define <vscale x 8 x float> @intrinsic_vslideup_mask_vx_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vslideup.mask.nxv8f32(
     <vscale x 8 x float> %0,
@@ -1678,7 +1678,7 @@ entry:
 define <vscale x 8 x float> @intrinsic_vslideup_vi_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 8 x float> @llvm.riscv.vslideup.nxv8f32(
     <vscale x 8 x float> %0,
@@ -1692,7 +1692,7 @@ entry:
 define <vscale x 8 x float> @intrinsic_vslideup_mask_vi_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vslideup.mask.nxv8f32(
     <vscale x 8 x float> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vslideup-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vslideup-rv64.ll
index 74b652f41be9..da11c56a0370 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vslideup-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vslideup-rv64.ll
@@ -9,7 +9,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vslideup_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -30,7 +30,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -45,7 +45,7 @@ entry:
 define <vscale x 1 x i8> @intrinsic_vslideup_vi_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -59,7 +59,7 @@ entry:
 define <vscale x 1 x i8> @intrinsic_vslideup_mask_vi_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -80,7 +80,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vslideup_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -101,7 +101,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vslideup.mask.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vslideup_mask_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vslideup.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -116,7 +116,7 @@ entry:
 define <vscale x 2 x i8> @intrinsic_vslideup_vi_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -130,7 +130,7 @@ entry:
 define <vscale x 2 x i8> @intrinsic_vslideup_mask_vi_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vslideup.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -151,7 +151,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vslideup_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -172,7 +172,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vslideup.mask.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vslideup_mask_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vslideup.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -187,7 +187,7 @@ entry:
 define <vscale x 4 x i8> @intrinsic_vslideup_vi_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -201,7 +201,7 @@ entry:
 define <vscale x 4 x i8> @intrinsic_vslideup_mask_vi_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vslideup.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -222,7 +222,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vslideup_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -243,7 +243,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vslideup.mask.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vslideup_mask_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vslideup.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -258,7 +258,7 @@ entry:
 define <vscale x 8 x i8> @intrinsic_vslideup_vi_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -272,7 +272,7 @@ entry:
 define <vscale x 8 x i8> @intrinsic_vslideup_mask_vi_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vslideup.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -293,7 +293,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vslideup_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -314,7 +314,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vslideup.mask.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vslideup_mask_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vslideup.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -329,7 +329,7 @@ entry:
 define <vscale x 16 x i8> @intrinsic_vslideup_vi_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -343,7 +343,7 @@ entry:
 define <vscale x 16 x i8> @intrinsic_vslideup_mask_vi_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vslideup.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -364,7 +364,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vslideup_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -385,7 +385,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vslideup.mask.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vslideup_mask_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vslideup.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -400,7 +400,7 @@ entry:
 define <vscale x 32 x i8> @intrinsic_vslideup_vi_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -414,7 +414,7 @@ entry:
 define <vscale x 32 x i8> @intrinsic_vslideup_mask_vi_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vslideup.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -435,7 +435,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vslideup_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -456,7 +456,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vslideup.mask.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vslideup_mask_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vslideup.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -471,7 +471,7 @@ entry:
 define <vscale x 1 x i16> @intrinsic_vslideup_vi_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -485,7 +485,7 @@ entry:
 define <vscale x 1 x i16> @intrinsic_vslideup_mask_vi_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vslideup.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -506,7 +506,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vslideup_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -527,7 +527,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vslideup.mask.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vslideup_mask_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vslideup.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -542,7 +542,7 @@ entry:
 define <vscale x 2 x i16> @intrinsic_vslideup_vi_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -556,7 +556,7 @@ entry:
 define <vscale x 2 x i16> @intrinsic_vslideup_mask_vi_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vslideup.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -577,7 +577,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vslideup_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -598,7 +598,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vslideup.mask.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vslideup_mask_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vslideup.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -613,7 +613,7 @@ entry:
 define <vscale x 4 x i16> @intrinsic_vslideup_vi_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -627,7 +627,7 @@ entry:
 define <vscale x 4 x i16> @intrinsic_vslideup_mask_vi_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vslideup.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -648,7 +648,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vslideup_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -669,7 +669,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vslideup.mask.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vslideup_mask_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vslideup.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -684,7 +684,7 @@ entry:
 define <vscale x 8 x i16> @intrinsic_vslideup_vi_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -698,7 +698,7 @@ entry:
 define <vscale x 8 x i16> @intrinsic_vslideup_mask_vi_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vslideup.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -719,7 +719,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vslideup_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -740,7 +740,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vslideup.mask.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vslideup_mask_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vslideup.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -755,7 +755,7 @@ entry:
 define <vscale x 16 x i16> @intrinsic_vslideup_vi_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -769,7 +769,7 @@ entry:
 define <vscale x 16 x i16> @intrinsic_vslideup_mask_vi_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vslideup.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -790,7 +790,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vslideup_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -811,7 +811,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vslideup.mask.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vslideup_mask_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vslideup.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -826,7 +826,7 @@ entry:
 define <vscale x 1 x i32> @intrinsic_vslideup_vi_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -840,7 +840,7 @@ entry:
 define <vscale x 1 x i32> @intrinsic_vslideup_mask_vi_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vslideup.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -861,7 +861,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vslideup_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -882,7 +882,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vslideup.mask.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vslideup_mask_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vslideup.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -897,7 +897,7 @@ entry:
 define <vscale x 2 x i32> @intrinsic_vslideup_vi_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -911,7 +911,7 @@ entry:
 define <vscale x 2 x i32> @intrinsic_vslideup_mask_vi_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vslideup.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -932,7 +932,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vslideup_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -953,7 +953,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vslideup.mask.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vslideup_mask_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vslideup.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -968,7 +968,7 @@ entry:
 define <vscale x 4 x i32> @intrinsic_vslideup_vi_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -982,7 +982,7 @@ entry:
 define <vscale x 4 x i32> @intrinsic_vslideup_mask_vi_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vslideup.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1003,7 +1003,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vslideup_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1024,7 +1024,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vslideup.mask.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vslideup_mask_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vslideup.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1039,7 +1039,7 @@ entry:
 define <vscale x 8 x i32> @intrinsic_vslideup_vi_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1053,7 +1053,7 @@ entry:
 define <vscale x 8 x i32> @intrinsic_vslideup_mask_vi_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vslideup.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1074,7 +1074,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vslideup_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i64_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1095,7 +1095,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vslideup.mask.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vslideup_mask_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i64_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vslideup.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1110,7 +1110,7 @@ entry:
 define <vscale x 1 x i64> @intrinsic_vslideup_vi_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i64_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1124,7 +1124,7 @@ entry:
 define <vscale x 1 x i64> @intrinsic_vslideup_mask_vi_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1i64_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vslideup.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1145,7 +1145,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64(
 define <vscale x 2 x i64> @intrinsic_vslideup_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i64_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1166,7 +1166,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vslideup.mask.nxv2i64(
 define <vscale x 2 x i64> @intrinsic_vslideup_mask_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2i64_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vslideup.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1181,7 +1181,7 @@ entry:
 define <vscale x 2 x i64> @intrinsic_vslideup_vi_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i64_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1195,7 +1195,7 @@ entry:
 define <vscale x 2 x i64> @intrinsic_vslideup_mask_vi_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2i64_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vslideup.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1216,7 +1216,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64(
 define <vscale x 4 x i64> @intrinsic_vslideup_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i64_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1237,7 +1237,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vslideup.mask.nxv4i64(
 define <vscale x 4 x i64> @intrinsic_vslideup_mask_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4i64_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vslideup.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1252,7 +1252,7 @@ entry:
 define <vscale x 4 x i64> @intrinsic_vslideup_vi_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i64_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1266,7 +1266,7 @@ entry:
 define <vscale x 4 x i64> @intrinsic_vslideup_mask_vi_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4i64_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vslideup.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1287,7 +1287,7 @@ declare <vscale x 1 x half> @llvm.riscv.vslideup.nxv1f16(
 define <vscale x 1 x half> @intrinsic_vslideup_vx_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i64 %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 1 x half> @llvm.riscv.vslideup.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1308,7 +1308,7 @@ declare <vscale x 1 x half> @llvm.riscv.vslideup.mask.nxv1f16(
 define <vscale x 1 x half> @intrinsic_vslideup_mask_vx_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vslideup.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1323,7 +1323,7 @@ entry:
 define <vscale x 1 x half> @intrinsic_vslideup_vi_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 1 x half> @llvm.riscv.vslideup.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1337,7 +1337,7 @@ entry:
 define <vscale x 1 x half> @intrinsic_vslideup_mask_vi_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vslideup.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1358,7 +1358,7 @@ declare <vscale x 2 x half> @llvm.riscv.vslideup.nxv2f16(
 define <vscale x 2 x half> @intrinsic_vslideup_vx_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i64 %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 2 x half> @llvm.riscv.vslideup.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1379,7 +1379,7 @@ declare <vscale x 2 x half> @llvm.riscv.vslideup.mask.nxv2f16(
 define <vscale x 2 x half> @intrinsic_vslideup_mask_vx_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vslideup.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1394,7 +1394,7 @@ entry:
 define <vscale x 2 x half> @intrinsic_vslideup_vi_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 2 x half> @llvm.riscv.vslideup.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1408,7 +1408,7 @@ entry:
 define <vscale x 2 x half> @intrinsic_vslideup_mask_vi_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vslideup.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1429,7 +1429,7 @@ declare <vscale x 4 x half> @llvm.riscv.vslideup.nxv4f16(
 define <vscale x 4 x half> @intrinsic_vslideup_vx_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i64 %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 4 x half> @llvm.riscv.vslideup.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1450,7 +1450,7 @@ declare <vscale x 4 x half> @llvm.riscv.vslideup.mask.nxv4f16(
 define <vscale x 4 x half> @intrinsic_vslideup_mask_vx_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vslideup.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1465,7 +1465,7 @@ entry:
 define <vscale x 4 x half> @intrinsic_vslideup_vi_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 4 x half> @llvm.riscv.vslideup.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1479,7 +1479,7 @@ entry:
 define <vscale x 4 x half> @intrinsic_vslideup_mask_vi_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vslideup.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1500,7 +1500,7 @@ declare <vscale x 8 x half> @llvm.riscv.vslideup.nxv8f16(
 define <vscale x 8 x half> @intrinsic_vslideup_vx_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i64 %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 8 x half> @llvm.riscv.vslideup.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1521,7 +1521,7 @@ declare <vscale x 8 x half> @llvm.riscv.vslideup.mask.nxv8f16(
 define <vscale x 8 x half> @intrinsic_vslideup_mask_vx_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vslideup.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1536,7 +1536,7 @@ entry:
 define <vscale x 8 x half> @intrinsic_vslideup_vi_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 8 x half> @llvm.riscv.vslideup.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1550,7 +1550,7 @@ entry:
 define <vscale x 8 x half> @intrinsic_vslideup_mask_vi_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vslideup.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1571,7 +1571,7 @@ declare <vscale x 16 x half> @llvm.riscv.vslideup.nxv16f16(
 define <vscale x 16 x half> @intrinsic_vslideup_vx_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i64 %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 16 x half> @llvm.riscv.vslideup.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1592,7 +1592,7 @@ declare <vscale x 16 x half> @llvm.riscv.vslideup.mask.nxv16f16(
 define <vscale x 16 x half> @intrinsic_vslideup_mask_vx_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vslideup.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1607,7 +1607,7 @@ entry:
 define <vscale x 16 x half> @intrinsic_vslideup_vi_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 16 x half> @llvm.riscv.vslideup.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1621,7 +1621,7 @@ entry:
 define <vscale x 16 x half> @intrinsic_vslideup_mask_vi_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vslideup.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1642,7 +1642,7 @@ declare <vscale x 1 x float> @llvm.riscv.vslideup.nxv1f32(
 define <vscale x 1 x float> @intrinsic_vslideup_vx_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i64 %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 1 x float> @llvm.riscv.vslideup.nxv1f32(
     <vscale x 1 x float> %0,
@@ -1663,7 +1663,7 @@ declare <vscale x 1 x float> @llvm.riscv.vslideup.mask.nxv1f32(
 define <vscale x 1 x float> @intrinsic_vslideup_mask_vx_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vslideup.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -1678,7 +1678,7 @@ entry:
 define <vscale x 1 x float> @intrinsic_vslideup_vi_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 1 x float> @llvm.riscv.vslideup.nxv1f32(
     <vscale x 1 x float> %0,
@@ -1692,7 +1692,7 @@ entry:
 define <vscale x 1 x float> @intrinsic_vslideup_mask_vi_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vslideup.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -1713,7 +1713,7 @@ declare <vscale x 2 x float> @llvm.riscv.vslideup.nxv2f32(
 define <vscale x 2 x float> @intrinsic_vslideup_vx_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i64 %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 2 x float> @llvm.riscv.vslideup.nxv2f32(
     <vscale x 2 x float> %0,
@@ -1734,7 +1734,7 @@ declare <vscale x 2 x float> @llvm.riscv.vslideup.mask.nxv2f32(
 define <vscale x 2 x float> @intrinsic_vslideup_mask_vx_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vslideup.mask.nxv2f32(
     <vscale x 2 x float> %0,
@@ -1749,7 +1749,7 @@ entry:
 define <vscale x 2 x float> @intrinsic_vslideup_vi_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 2 x float> @llvm.riscv.vslideup.nxv2f32(
     <vscale x 2 x float> %0,
@@ -1763,7 +1763,7 @@ entry:
 define <vscale x 2 x float> @intrinsic_vslideup_mask_vi_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vslideup.mask.nxv2f32(
     <vscale x 2 x float> %0,
@@ -1784,7 +1784,7 @@ declare <vscale x 4 x float> @llvm.riscv.vslideup.nxv4f32(
 define <vscale x 4 x float> @intrinsic_vslideup_vx_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i64 %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 4 x float> @llvm.riscv.vslideup.nxv4f32(
     <vscale x 4 x float> %0,
@@ -1805,7 +1805,7 @@ declare <vscale x 4 x float> @llvm.riscv.vslideup.mask.nxv4f32(
 define <vscale x 4 x float> @intrinsic_vslideup_mask_vx_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vslideup.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -1820,7 +1820,7 @@ entry:
 define <vscale x 4 x float> @intrinsic_vslideup_vi_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 4 x float> @llvm.riscv.vslideup.nxv4f32(
     <vscale x 4 x float> %0,
@@ -1834,7 +1834,7 @@ entry:
 define <vscale x 4 x float> @intrinsic_vslideup_mask_vi_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vslideup.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -1855,7 +1855,7 @@ declare <vscale x 8 x float> @llvm.riscv.vslideup.nxv8f32(
 define <vscale x 8 x float> @intrinsic_vslideup_vx_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i64 %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 8 x float> @llvm.riscv.vslideup.nxv8f32(
     <vscale x 8 x float> %0,
@@ -1876,7 +1876,7 @@ declare <vscale x 8 x float> @llvm.riscv.vslideup.mask.nxv8f32(
 define <vscale x 8 x float> @intrinsic_vslideup_mask_vx_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vslideup.mask.nxv8f32(
     <vscale x 8 x float> %0,
@@ -1891,7 +1891,7 @@ entry:
 define <vscale x 8 x float> @intrinsic_vslideup_vi_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 8 x float> @llvm.riscv.vslideup.nxv8f32(
     <vscale x 8 x float> %0,
@@ -1905,7 +1905,7 @@ entry:
 define <vscale x 8 x float> @intrinsic_vslideup_mask_vi_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vslideup.mask.nxv8f32(
     <vscale x 8 x float> %0,
@@ -1926,7 +1926,7 @@ declare <vscale x 1 x double> @llvm.riscv.vslideup.nxv1f64(
 define <vscale x 1 x double> @intrinsic_vslideup_vx_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i64 %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1f64_nxv1f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 1 x double> @llvm.riscv.vslideup.nxv1f64(
     <vscale x 1 x double> %0,
@@ -1947,7 +1947,7 @@ declare <vscale x 1 x double> @llvm.riscv.vslideup.mask.nxv1f64(
 define <vscale x 1 x double> @intrinsic_vslideup_mask_vx_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1f64_nxv1f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 1 x double> @llvm.riscv.vslideup.mask.nxv1f64(
     <vscale x 1 x double> %0,
@@ -1962,7 +1962,7 @@ entry:
 define <vscale x 1 x double> @intrinsic_vslideup_vi_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1f64_nxv1f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 1 x double> @llvm.riscv.vslideup.nxv1f64(
     <vscale x 1 x double> %0,
@@ -1976,7 +1976,7 @@ entry:
 define <vscale x 1 x double> @intrinsic_vslideup_mask_vi_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1f64_nxv1f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x double> @llvm.riscv.vslideup.mask.nxv1f64(
     <vscale x 1 x double> %0,
@@ -1997,7 +1997,7 @@ declare <vscale x 2 x double> @llvm.riscv.vslideup.nxv2f64(
 define <vscale x 2 x double> @intrinsic_vslideup_vx_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i64 %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2f64_nxv2f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 2 x double> @llvm.riscv.vslideup.nxv2f64(
     <vscale x 2 x double> %0,
@@ -2018,7 +2018,7 @@ declare <vscale x 2 x double> @llvm.riscv.vslideup.mask.nxv2f64(
 define <vscale x 2 x double> @intrinsic_vslideup_mask_vx_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2f64_nxv2f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 2 x double> @llvm.riscv.vslideup.mask.nxv2f64(
     <vscale x 2 x double> %0,
@@ -2033,7 +2033,7 @@ entry:
 define <vscale x 2 x double> @intrinsic_vslideup_vi_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2f64_nxv2f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 2 x double> @llvm.riscv.vslideup.nxv2f64(
     <vscale x 2 x double> %0,
@@ -2047,7 +2047,7 @@ entry:
 define <vscale x 2 x double> @intrinsic_vslideup_mask_vi_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2f64_nxv2f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x double> @llvm.riscv.vslideup.mask.nxv2f64(
     <vscale x 2 x double> %0,
@@ -2068,7 +2068,7 @@ declare <vscale x 4 x double> @llvm.riscv.vslideup.nxv4f64(
 define <vscale x 4 x double> @intrinsic_vslideup_vx_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i64 %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4f64_nxv4f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 4 x double> @llvm.riscv.vslideup.nxv4f64(
     <vscale x 4 x double> %0,
@@ -2089,7 +2089,7 @@ declare <vscale x 4 x double> @llvm.riscv.vslideup.mask.nxv4f64(
 define <vscale x 4 x double> @intrinsic_vslideup_mask_vx_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4f64_nxv4f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 4 x double> @llvm.riscv.vslideup.mask.nxv4f64(
     <vscale x 4 x double> %0,
@@ -2104,7 +2104,7 @@ entry:
 define <vscale x 4 x double> @intrinsic_vslideup_vi_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i64 %2) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4f64_nxv4f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 4 x double> @llvm.riscv.vslideup.nxv4f64(
     <vscale x 4 x double> %0,
@@ -2118,7 +2118,7 @@ entry:
 define <vscale x 4 x double> @intrinsic_vslideup_mask_vi_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4f64_nxv4f64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x double> @llvm.riscv.vslideup.mask.nxv4f64(
     <vscale x 4 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll
index 67f18ec9378d..205f16bc9b9b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vsll_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vsll_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vsll_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vsll_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vsll_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vsll_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv64i8_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -308,7 +308,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vsll_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vsll_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vsll_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vsll_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vsll_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vsll_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv32i16_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vsll_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -588,7 +588,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vsll_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -628,7 +628,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vsll_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -668,7 +668,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vsll_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -708,7 +708,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vsll_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv16i32_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vsll_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -788,7 +788,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vsll_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -828,7 +828,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vsll_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -868,7 +868,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vsll_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -908,7 +908,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vsll_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -948,7 +948,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vsll_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -988,7 +988,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vsll_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vsll_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vsll_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vsll_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vsll_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vsll_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1228,7 +1228,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vsll_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1268,7 +1268,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vsll_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1308,7 +1308,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vsll_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1348,7 +1348,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vsll_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1388,7 +1388,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vsll_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1428,7 +1428,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vsll_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1456,7 +1456,7 @@ entry:
 define <vscale x 1 x i8> @intrinsic_vsll_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1484,7 +1484,7 @@ entry:
 define <vscale x 2 x i8> @intrinsic_vsll_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1512,7 +1512,7 @@ entry:
 define <vscale x 4 x i8> @intrinsic_vsll_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1540,7 +1540,7 @@ entry:
 define <vscale x 8 x i8> @intrinsic_vsll_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1568,7 +1568,7 @@ entry:
 define <vscale x 16 x i8> @intrinsic_vsll_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1596,7 +1596,7 @@ entry:
 define <vscale x 32 x i8> @intrinsic_vsll_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1624,7 +1624,7 @@ entry:
 define <vscale x 64 x i8> @intrinsic_vsll_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1652,7 +1652,7 @@ entry:
 define <vscale x 1 x i16> @intrinsic_vsll_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1680,7 +1680,7 @@ entry:
 define <vscale x 2 x i16> @intrinsic_vsll_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1708,7 +1708,7 @@ entry:
 define <vscale x 4 x i16> @intrinsic_vsll_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1736,7 +1736,7 @@ entry:
 define <vscale x 8 x i16> @intrinsic_vsll_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1764,7 +1764,7 @@ entry:
 define <vscale x 16 x i16> @intrinsic_vsll_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1792,7 +1792,7 @@ entry:
 define <vscale x 32 x i16> @intrinsic_vsll_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1820,7 +1820,7 @@ entry:
 define <vscale x 1 x i32> @intrinsic_vsll_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1848,7 +1848,7 @@ entry:
 define <vscale x 2 x i32> @intrinsic_vsll_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1876,7 +1876,7 @@ entry:
 define <vscale x 4 x i32> @intrinsic_vsll_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1904,7 +1904,7 @@ entry:
 define <vscale x 8 x i32> @intrinsic_vsll_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1932,7 +1932,7 @@ entry:
 define <vscale x 16 x i32> @intrinsic_vsll_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll
index bbf799fb8d5b..201c90599917 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vsll_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vsll_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vsll_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vsll_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vsll_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vsll_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv64i8_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -308,7 +308,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vsll_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vsll_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vsll_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vsll_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vsll_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vsll_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv32i16_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vsll_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -588,7 +588,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vsll_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -628,7 +628,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vsll_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -668,7 +668,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vsll_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -708,7 +708,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vsll_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv16i32_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vsll_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i64_nxv1i64_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -788,7 +788,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64.nxv2i64(
 define <vscale x 2 x i64> @intrinsic_vsll_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv2i64_nxv2i64_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -828,7 +828,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64.nxv4i64(
 define <vscale x 4 x i64> @intrinsic_vsll_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv4i64_nxv4i64_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -868,7 +868,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64.nxv8i64(
 define <vscale x 8 x i64> @intrinsic_vsll_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv8i64_nxv8i64_nxv8i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -908,7 +908,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vsll_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -948,7 +948,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vsll_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -988,7 +988,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vsll_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vsll_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vsll_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vsll_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vsll_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vsll_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1228,7 +1228,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vsll_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1268,7 +1268,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vsll_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1308,7 +1308,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vsll_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1348,7 +1348,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vsll_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1388,7 +1388,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vsll_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1428,7 +1428,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vsll_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1468,7 +1468,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vsll_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1508,7 +1508,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vsll_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1548,7 +1548,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vsll_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1588,7 +1588,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vsll_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1628,7 +1628,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64.i64(
 define <vscale x 1 x i64> @intrinsic_vsll_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i64_nxv1i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1668,7 +1668,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64.i64(
 define <vscale x 2 x i64> @intrinsic_vsll_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i64_nxv2i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1708,7 +1708,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64.i64(
 define <vscale x 4 x i64> @intrinsic_vsll_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i64_nxv4i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1748,7 +1748,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64.i64(
 define <vscale x 8 x i64> @intrinsic_vsll_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i64_nxv8i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1776,7 +1776,7 @@ entry:
 define <vscale x 1 x i8> @intrinsic_vsll_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1804,7 +1804,7 @@ entry:
 define <vscale x 2 x i8> @intrinsic_vsll_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1832,7 +1832,7 @@ entry:
 define <vscale x 4 x i8> @intrinsic_vsll_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1860,7 +1860,7 @@ entry:
 define <vscale x 8 x i8> @intrinsic_vsll_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1888,7 +1888,7 @@ entry:
 define <vscale x 16 x i8> @intrinsic_vsll_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1916,7 +1916,7 @@ entry:
 define <vscale x 32 x i8> @intrinsic_vsll_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1944,7 +1944,7 @@ entry:
 define <vscale x 64 x i8> @intrinsic_vsll_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1972,7 +1972,7 @@ entry:
 define <vscale x 1 x i16> @intrinsic_vsll_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2000,7 +2000,7 @@ entry:
 define <vscale x 2 x i16> @intrinsic_vsll_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2028,7 +2028,7 @@ entry:
 define <vscale x 4 x i16> @intrinsic_vsll_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2056,7 +2056,7 @@ entry:
 define <vscale x 8 x i16> @intrinsic_vsll_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2084,7 +2084,7 @@ entry:
 define <vscale x 16 x i16> @intrinsic_vsll_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2112,7 +2112,7 @@ entry:
 define <vscale x 32 x i16> @intrinsic_vsll_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -2140,7 +2140,7 @@ entry:
 define <vscale x 1 x i32> @intrinsic_vsll_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2168,7 +2168,7 @@ entry:
 define <vscale x 2 x i32> @intrinsic_vsll_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2196,7 +2196,7 @@ entry:
 define <vscale x 4 x i32> @intrinsic_vsll_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2224,7 +2224,7 @@ entry:
 define <vscale x 8 x i32> @intrinsic_vsll_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2252,7 +2252,7 @@ entry:
 define <vscale x 16 x i32> @intrinsic_vsll_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2280,7 +2280,7 @@ entry:
 define <vscale x 1 x i64> @intrinsic_vsll_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv1i64_nxv1i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2308,7 +2308,7 @@ entry:
 define <vscale x 2 x i64> @intrinsic_vsll_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv2i64_nxv2i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2336,7 +2336,7 @@ entry:
 define <vscale x 4 x i64> @intrinsic_vsll_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv4i64_nxv4i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2364,7 +2364,7 @@ entry:
 define <vscale x 8 x i64> @intrinsic_vsll_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv8i64_nxv8i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll
index fb9fc7c90876..1fa0eea13e57 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vsmul_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vsmul_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vsmul_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vsmul_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vsmul_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -308,7 +308,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vsmul_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vsmul_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vsmul_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vsmul_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vsmul_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vsmul_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv32i16_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vsmul_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -588,7 +588,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vsmul_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -628,7 +628,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vsmul_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -668,7 +668,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vsmul_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -708,7 +708,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vsmul_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i32_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vsmul_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -788,7 +788,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vsmul_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -828,7 +828,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vsmul_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -868,7 +868,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vsmul_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -908,7 +908,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vsmul_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -948,7 +948,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vsmul_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -988,7 +988,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vsmul_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vsmul_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vsmul_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vsmul_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vsmul_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vsmul_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1228,7 +1228,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vsmul_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1268,7 +1268,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vsmul_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1308,7 +1308,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vsmul_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1348,7 +1348,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vsmul_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1388,7 +1388,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vsmul_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1428,7 +1428,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vsmul_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll
index c418e428ef0d..2b63398a7898 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vsmul_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vsmul_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vsmul_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vsmul_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vsmul_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -308,7 +308,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vsmul_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vsmul_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vsmul_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vsmul_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vsmul_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vsmul_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv32i16_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vsmul_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -588,7 +588,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vsmul_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -628,7 +628,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vsmul_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -668,7 +668,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vsmul_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -708,7 +708,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vsmul_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i32_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vsmul_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i64_nxv1i64_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -788,7 +788,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64(
 define <vscale x 2 x i64> @intrinsic_vsmul_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i64_nxv2i64_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -828,7 +828,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64(
 define <vscale x 4 x i64> @intrinsic_vsmul_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i64_nxv4i64_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -868,7 +868,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64(
 define <vscale x 8 x i64> @intrinsic_vsmul_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i64_nxv8i64_nxv8i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -908,7 +908,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vsmul_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -948,7 +948,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vsmul_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -988,7 +988,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vsmul_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vsmul_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vsmul_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vsmul_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vsmul_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vsmul_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1228,7 +1228,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vsmul_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1268,7 +1268,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vsmul_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1308,7 +1308,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vsmul_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1348,7 +1348,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vsmul_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1388,7 +1388,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vsmul_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1428,7 +1428,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vsmul_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1468,7 +1468,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vsmul_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1508,7 +1508,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vsmul_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1548,7 +1548,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vsmul_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1588,7 +1588,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vsmul_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1628,7 +1628,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.i64(
 define <vscale x 1 x i64> @intrinsic_vsmul_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i64_nxv1i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1668,7 +1668,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.i64(
 define <vscale x 2 x i64> @intrinsic_vsmul_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i64_nxv2i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1708,7 +1708,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.i64(
 define <vscale x 4 x i64> @intrinsic_vsmul_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i64_nxv4i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1748,7 +1748,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.i64(
 define <vscale x 8 x i64> @intrinsic_vsmul_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i64_nxv8i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll
index 24697ab6b8e7..64faa40d2004 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vsra_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vsra_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vsra_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vsra_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vsra_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vsra_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vsra_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv64i8_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -308,7 +308,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vsra_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vsra_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vsra_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vsra_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vsra_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vsra_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv32i16_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vsra_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -588,7 +588,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vsra_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -628,7 +628,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vsra_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -668,7 +668,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vsra_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -708,7 +708,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vsra_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv16i32_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vsra_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -788,7 +788,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vsra_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -828,7 +828,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vsra_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -868,7 +868,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vsra_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -908,7 +908,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vsra_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -948,7 +948,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vsra_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -988,7 +988,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vsra_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vsra_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vsra_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vsra_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vsra_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vsra_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1228,7 +1228,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vsra_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1268,7 +1268,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vsra_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1308,7 +1308,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vsra_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1348,7 +1348,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vsra_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1388,7 +1388,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vsra_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1428,7 +1428,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vsra_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1456,7 +1456,7 @@ entry:
 define <vscale x 1 x i8> @intrinsic_vsra_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1484,7 +1484,7 @@ entry:
 define <vscale x 2 x i8> @intrinsic_vsra_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1512,7 +1512,7 @@ entry:
 define <vscale x 4 x i8> @intrinsic_vsra_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1540,7 +1540,7 @@ entry:
 define <vscale x 8 x i8> @intrinsic_vsra_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1568,7 +1568,7 @@ entry:
 define <vscale x 16 x i8> @intrinsic_vsra_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1596,7 +1596,7 @@ entry:
 define <vscale x 32 x i8> @intrinsic_vsra_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1624,7 +1624,7 @@ entry:
 define <vscale x 64 x i8> @intrinsic_vsra_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1652,7 +1652,7 @@ entry:
 define <vscale x 1 x i16> @intrinsic_vsra_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1680,7 +1680,7 @@ entry:
 define <vscale x 2 x i16> @intrinsic_vsra_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1708,7 +1708,7 @@ entry:
 define <vscale x 4 x i16> @intrinsic_vsra_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1736,7 +1736,7 @@ entry:
 define <vscale x 8 x i16> @intrinsic_vsra_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1764,7 +1764,7 @@ entry:
 define <vscale x 16 x i16> @intrinsic_vsra_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1792,7 +1792,7 @@ entry:
 define <vscale x 32 x i16> @intrinsic_vsra_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1820,7 +1820,7 @@ entry:
 define <vscale x 1 x i32> @intrinsic_vsra_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1848,7 +1848,7 @@ entry:
 define <vscale x 2 x i32> @intrinsic_vsra_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1876,7 +1876,7 @@ entry:
 define <vscale x 4 x i32> @intrinsic_vsra_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1904,7 +1904,7 @@ entry:
 define <vscale x 8 x i32> @intrinsic_vsra_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1932,7 +1932,7 @@ entry:
 define <vscale x 16 x i32> @intrinsic_vsra_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll
index 5a2d9e6935a8..46e66deea484 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vsra_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vsra_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vsra_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vsra_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vsra_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vsra_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vsra_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv64i8_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -308,7 +308,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vsra_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vsra_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vsra_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vsra_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vsra_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vsra_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv32i16_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vsra_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -588,7 +588,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vsra_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -628,7 +628,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vsra_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -668,7 +668,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vsra_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -708,7 +708,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vsra_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv16i32_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vsra.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vsra_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv1i64_nxv1i64_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vsra.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -788,7 +788,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vsra.mask.nxv2i64.nxv2i64(
 define <vscale x 2 x i64> @intrinsic_vsra_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv2i64_nxv2i64_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vsra.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -828,7 +828,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vsra.mask.nxv4i64.nxv4i64(
 define <vscale x 4 x i64> @intrinsic_vsra_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv4i64_nxv4i64_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vsra.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -868,7 +868,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vsra.mask.nxv8i64.nxv8i64(
 define <vscale x 8 x i64> @intrinsic_vsra_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv8i64_nxv8i64_nxv8i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vsra.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -908,7 +908,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vsra_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -948,7 +948,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vsra_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -988,7 +988,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vsra_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vsra_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vsra_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vsra_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vsra_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vsra_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1228,7 +1228,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vsra_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1268,7 +1268,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vsra_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1308,7 +1308,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vsra_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1348,7 +1348,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vsra_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1388,7 +1388,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vsra_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1428,7 +1428,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vsra_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1468,7 +1468,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vsra_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1508,7 +1508,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vsra_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1548,7 +1548,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vsra_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1588,7 +1588,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vsra_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1628,7 +1628,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vsra.mask.nxv1i64.i64(
 define <vscale x 1 x i64> @intrinsic_vsra_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i64_nxv1i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vsra.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1668,7 +1668,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vsra.mask.nxv2i64.i64(
 define <vscale x 2 x i64> @intrinsic_vsra_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i64_nxv2i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vsra.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1708,7 +1708,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vsra.mask.nxv4i64.i64(
 define <vscale x 4 x i64> @intrinsic_vsra_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i64_nxv4i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vsra.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1748,7 +1748,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vsra.mask.nxv8i64.i64(
 define <vscale x 8 x i64> @intrinsic_vsra_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i64_nxv8i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vsra.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1776,7 +1776,7 @@ entry:
 define <vscale x 1 x i8> @intrinsic_vsra_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1804,7 +1804,7 @@ entry:
 define <vscale x 2 x i8> @intrinsic_vsra_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1832,7 +1832,7 @@ entry:
 define <vscale x 4 x i8> @intrinsic_vsra_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1860,7 +1860,7 @@ entry:
 define <vscale x 8 x i8> @intrinsic_vsra_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1888,7 +1888,7 @@ entry:
 define <vscale x 16 x i8> @intrinsic_vsra_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1916,7 +1916,7 @@ entry:
 define <vscale x 32 x i8> @intrinsic_vsra_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1944,7 +1944,7 @@ entry:
 define <vscale x 64 x i8> @intrinsic_vsra_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1972,7 +1972,7 @@ entry:
 define <vscale x 1 x i16> @intrinsic_vsra_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2000,7 +2000,7 @@ entry:
 define <vscale x 2 x i16> @intrinsic_vsra_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2028,7 +2028,7 @@ entry:
 define <vscale x 4 x i16> @intrinsic_vsra_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2056,7 +2056,7 @@ entry:
 define <vscale x 8 x i16> @intrinsic_vsra_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2084,7 +2084,7 @@ entry:
 define <vscale x 16 x i16> @intrinsic_vsra_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2112,7 +2112,7 @@ entry:
 define <vscale x 32 x i16> @intrinsic_vsra_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -2140,7 +2140,7 @@ entry:
 define <vscale x 1 x i32> @intrinsic_vsra_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2168,7 +2168,7 @@ entry:
 define <vscale x 2 x i32> @intrinsic_vsra_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2196,7 +2196,7 @@ entry:
 define <vscale x 4 x i32> @intrinsic_vsra_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2224,7 +2224,7 @@ entry:
 define <vscale x 8 x i32> @intrinsic_vsra_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2252,7 +2252,7 @@ entry:
 define <vscale x 16 x i32> @intrinsic_vsra_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2280,7 +2280,7 @@ entry:
 define <vscale x 1 x i64> @intrinsic_vsra_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv1i64_nxv1i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vsra.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2308,7 +2308,7 @@ entry:
 define <vscale x 2 x i64> @intrinsic_vsra_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv2i64_nxv2i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vsra.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2336,7 +2336,7 @@ entry:
 define <vscale x 4 x i64> @intrinsic_vsra_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv4i64_nxv4i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vsra.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2364,7 +2364,7 @@ entry:
 define <vscale x 8 x i64> @intrinsic_vsra_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv8i64_nxv8i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vsra.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll
index 514c3d31dba1..2067ea38c143 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vsrl_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vsrl_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vsrl_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vsrl_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vsrl_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vsrl_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vsrl_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv64i8_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -308,7 +308,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vsrl_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vsrl_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vsrl_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vsrl_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vsrl_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vsrl_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv32i16_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vsrl_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -588,7 +588,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vsrl_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -628,7 +628,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vsrl_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -668,7 +668,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vsrl_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -708,7 +708,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vsrl_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv16i32_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vsrl_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -788,7 +788,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vsrl_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -828,7 +828,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vsrl_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -868,7 +868,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vsrl_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -908,7 +908,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vsrl_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -948,7 +948,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vsrl_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -988,7 +988,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vsrl_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vsrl_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vsrl_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vsrl_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vsrl_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vsrl_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1228,7 +1228,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vsrl_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1268,7 +1268,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vsrl_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1308,7 +1308,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vsrl_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1348,7 +1348,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vsrl_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1388,7 +1388,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vsrl_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1428,7 +1428,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vsrl_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1456,7 +1456,7 @@ entry:
 define <vscale x 1 x i8> @intrinsic_vsrl_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1484,7 +1484,7 @@ entry:
 define <vscale x 2 x i8> @intrinsic_vsrl_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1512,7 +1512,7 @@ entry:
 define <vscale x 4 x i8> @intrinsic_vsrl_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1540,7 +1540,7 @@ entry:
 define <vscale x 8 x i8> @intrinsic_vsrl_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1568,7 +1568,7 @@ entry:
 define <vscale x 16 x i8> @intrinsic_vsrl_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1596,7 +1596,7 @@ entry:
 define <vscale x 32 x i8> @intrinsic_vsrl_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1624,7 +1624,7 @@ entry:
 define <vscale x 64 x i8> @intrinsic_vsrl_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1652,7 +1652,7 @@ entry:
 define <vscale x 1 x i16> @intrinsic_vsrl_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1680,7 +1680,7 @@ entry:
 define <vscale x 2 x i16> @intrinsic_vsrl_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1708,7 +1708,7 @@ entry:
 define <vscale x 4 x i16> @intrinsic_vsrl_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1736,7 +1736,7 @@ entry:
 define <vscale x 8 x i16> @intrinsic_vsrl_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1764,7 +1764,7 @@ entry:
 define <vscale x 16 x i16> @intrinsic_vsrl_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1792,7 +1792,7 @@ entry:
 define <vscale x 32 x i16> @intrinsic_vsrl_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1820,7 +1820,7 @@ entry:
 define <vscale x 1 x i32> @intrinsic_vsrl_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1848,7 +1848,7 @@ entry:
 define <vscale x 2 x i32> @intrinsic_vsrl_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1876,7 +1876,7 @@ entry:
 define <vscale x 4 x i32> @intrinsic_vsrl_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1904,7 +1904,7 @@ entry:
 define <vscale x 8 x i32> @intrinsic_vsrl_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1932,7 +1932,7 @@ entry:
 define <vscale x 16 x i32> @intrinsic_vsrl_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll
index 9598eed618b0..4afedd7df0d2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vsrl_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vsrl_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vsrl_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vsrl_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vsrl_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vsrl_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vsrl_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv64i8_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -308,7 +308,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vsrl_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vsrl_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vsrl_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vsrl_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vsrl_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vsrl_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv32i16_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vsrl_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -588,7 +588,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vsrl_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -628,7 +628,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vsrl_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -668,7 +668,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vsrl_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -708,7 +708,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vsrl_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv16i32_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vsrl.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vsrl_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv1i64_nxv1i64_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vsrl.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -788,7 +788,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vsrl.mask.nxv2i64.nxv2i64(
 define <vscale x 2 x i64> @intrinsic_vsrl_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv2i64_nxv2i64_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vsrl.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -828,7 +828,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vsrl.mask.nxv4i64.nxv4i64(
 define <vscale x 4 x i64> @intrinsic_vsrl_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv4i64_nxv4i64_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vsrl.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -868,7 +868,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vsrl.mask.nxv8i64.nxv8i64(
 define <vscale x 8 x i64> @intrinsic_vsrl_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv8i64_nxv8i64_nxv8i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vsrl.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -908,7 +908,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vsrl_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -948,7 +948,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vsrl_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -988,7 +988,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vsrl_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vsrl_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vsrl_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vsrl_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vsrl_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vsrl_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1228,7 +1228,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vsrl_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1268,7 +1268,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vsrl_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1308,7 +1308,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vsrl_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1348,7 +1348,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vsrl_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1388,7 +1388,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vsrl_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1428,7 +1428,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vsrl_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1468,7 +1468,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vsrl_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1508,7 +1508,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vsrl_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1548,7 +1548,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vsrl_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1588,7 +1588,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vsrl_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1628,7 +1628,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vsrl.mask.nxv1i64.i64(
 define <vscale x 1 x i64> @intrinsic_vsrl_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i64_nxv1i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vsrl.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1668,7 +1668,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vsrl.mask.nxv2i64.i64(
 define <vscale x 2 x i64> @intrinsic_vsrl_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i64_nxv2i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vsrl.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1708,7 +1708,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vsrl.mask.nxv4i64.i64(
 define <vscale x 4 x i64> @intrinsic_vsrl_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i64_nxv4i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vsrl.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1748,7 +1748,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vsrl.mask.nxv8i64.i64(
 define <vscale x 8 x i64> @intrinsic_vsrl_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i64_nxv8i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vsrl.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1776,7 +1776,7 @@ entry:
 define <vscale x 1 x i8> @intrinsic_vsrl_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1804,7 +1804,7 @@ entry:
 define <vscale x 2 x i8> @intrinsic_vsrl_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1832,7 +1832,7 @@ entry:
 define <vscale x 4 x i8> @intrinsic_vsrl_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1860,7 +1860,7 @@ entry:
 define <vscale x 8 x i8> @intrinsic_vsrl_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1888,7 +1888,7 @@ entry:
 define <vscale x 16 x i8> @intrinsic_vsrl_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1916,7 +1916,7 @@ entry:
 define <vscale x 32 x i8> @intrinsic_vsrl_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1944,7 +1944,7 @@ entry:
 define <vscale x 64 x i8> @intrinsic_vsrl_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1972,7 +1972,7 @@ entry:
 define <vscale x 1 x i16> @intrinsic_vsrl_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2000,7 +2000,7 @@ entry:
 define <vscale x 2 x i16> @intrinsic_vsrl_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2028,7 +2028,7 @@ entry:
 define <vscale x 4 x i16> @intrinsic_vsrl_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2056,7 +2056,7 @@ entry:
 define <vscale x 8 x i16> @intrinsic_vsrl_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2084,7 +2084,7 @@ entry:
 define <vscale x 16 x i16> @intrinsic_vsrl_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2112,7 +2112,7 @@ entry:
 define <vscale x 32 x i16> @intrinsic_vsrl_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -2140,7 +2140,7 @@ entry:
 define <vscale x 1 x i32> @intrinsic_vsrl_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2168,7 +2168,7 @@ entry:
 define <vscale x 2 x i32> @intrinsic_vsrl_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2196,7 +2196,7 @@ entry:
 define <vscale x 4 x i32> @intrinsic_vsrl_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2224,7 +2224,7 @@ entry:
 define <vscale x 8 x i32> @intrinsic_vsrl_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2252,7 +2252,7 @@ entry:
 define <vscale x 16 x i32> @intrinsic_vsrl_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2280,7 +2280,7 @@ entry:
 define <vscale x 1 x i64> @intrinsic_vsrl_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv1i64_nxv1i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vsrl.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2308,7 +2308,7 @@ entry:
 define <vscale x 2 x i64> @intrinsic_vsrl_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv2i64_nxv2i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vsrl.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2336,7 +2336,7 @@ entry:
 define <vscale x 4 x i64> @intrinsic_vsrl_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv4i64_nxv4i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vsrl.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2364,7 +2364,7 @@ entry:
 define <vscale x 8 x i64> @intrinsic_vsrl_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv8i64_nxv8i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vsrl.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll
index 7cb0e4e5cbd5..ad94faac73b4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vssra_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vssra_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vssra_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vssra_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vssra_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vssra_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vssra_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv64i8_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -308,7 +308,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vssra_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vssra_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vssra_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vssra_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vssra_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vssra_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv32i16_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vssra_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -588,7 +588,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vssra_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -628,7 +628,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vssra_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -668,7 +668,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vssra_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -708,7 +708,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vssra_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv16i32_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vssra_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -788,7 +788,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vssra_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -828,7 +828,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vssra_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -868,7 +868,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vssra_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -908,7 +908,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vssra_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -948,7 +948,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vssra_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -988,7 +988,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vssra_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vssra_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vssra_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vssra_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vssra_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vssra_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1228,7 +1228,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vssra_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1268,7 +1268,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vssra_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1308,7 +1308,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vssra_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1348,7 +1348,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vssra_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1388,7 +1388,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vssra_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1428,7 +1428,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vssra_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1456,7 +1456,7 @@ entry:
 define <vscale x 1 x i8> @intrinsic_vssra_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1484,7 +1484,7 @@ entry:
 define <vscale x 2 x i8> @intrinsic_vssra_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1512,7 +1512,7 @@ entry:
 define <vscale x 4 x i8> @intrinsic_vssra_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1540,7 +1540,7 @@ entry:
 define <vscale x 8 x i8> @intrinsic_vssra_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1568,7 +1568,7 @@ entry:
 define <vscale x 16 x i8> @intrinsic_vssra_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1596,7 +1596,7 @@ entry:
 define <vscale x 32 x i8> @intrinsic_vssra_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1624,7 +1624,7 @@ entry:
 define <vscale x 64 x i8> @intrinsic_vssra_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1652,7 +1652,7 @@ entry:
 define <vscale x 1 x i16> @intrinsic_vssra_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1680,7 +1680,7 @@ entry:
 define <vscale x 2 x i16> @intrinsic_vssra_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1708,7 +1708,7 @@ entry:
 define <vscale x 4 x i16> @intrinsic_vssra_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1736,7 +1736,7 @@ entry:
 define <vscale x 8 x i16> @intrinsic_vssra_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1764,7 +1764,7 @@ entry:
 define <vscale x 16 x i16> @intrinsic_vssra_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1792,7 +1792,7 @@ entry:
 define <vscale x 32 x i16> @intrinsic_vssra_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1820,7 +1820,7 @@ entry:
 define <vscale x 1 x i32> @intrinsic_vssra_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1848,7 +1848,7 @@ entry:
 define <vscale x 2 x i32> @intrinsic_vssra_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1876,7 +1876,7 @@ entry:
 define <vscale x 4 x i32> @intrinsic_vssra_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1904,7 +1904,7 @@ entry:
 define <vscale x 8 x i32> @intrinsic_vssra_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1932,7 +1932,7 @@ entry:
 define <vscale x 16 x i32> @intrinsic_vssra_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll
index 044dc7f7df0f..b00271b71735 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vssra_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vssra_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vssra_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vssra_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vssra_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vssra_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vssra_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv64i8_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -308,7 +308,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vssra_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vssra_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vssra_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vssra_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vssra_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vssra_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv32i16_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vssra_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -588,7 +588,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vssra_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -628,7 +628,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vssra_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -668,7 +668,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vssra_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -708,7 +708,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vssra_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv16i32_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vssra_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv1i64_nxv1i64_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -788,7 +788,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.nxv2i64(
 define <vscale x 2 x i64> @intrinsic_vssra_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv2i64_nxv2i64_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -828,7 +828,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.nxv4i64(
 define <vscale x 4 x i64> @intrinsic_vssra_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv4i64_nxv4i64_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -868,7 +868,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.nxv8i64(
 define <vscale x 8 x i64> @intrinsic_vssra_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv8i64_nxv8i64_nxv8i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -908,7 +908,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vssra_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -948,7 +948,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vssra_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -988,7 +988,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vssra_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vssra_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vssra_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vssra_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vssra_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vssra_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1228,7 +1228,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vssra_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1268,7 +1268,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vssra_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1308,7 +1308,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vssra_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1348,7 +1348,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vssra_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1388,7 +1388,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vssra_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1428,7 +1428,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vssra_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1468,7 +1468,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vssra_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1508,7 +1508,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vssra_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1548,7 +1548,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vssra_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1588,7 +1588,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vssra_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1628,7 +1628,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.i64(
 define <vscale x 1 x i64> @intrinsic_vssra_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i64_nxv1i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1668,7 +1668,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.i64(
 define <vscale x 2 x i64> @intrinsic_vssra_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i64_nxv2i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1708,7 +1708,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.i64(
 define <vscale x 4 x i64> @intrinsic_vssra_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i64_nxv4i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1748,7 +1748,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.i64(
 define <vscale x 8 x i64> @intrinsic_vssra_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i64_nxv8i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1776,7 +1776,7 @@ entry:
 define <vscale x 1 x i8> @intrinsic_vssra_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1804,7 +1804,7 @@ entry:
 define <vscale x 2 x i8> @intrinsic_vssra_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1832,7 +1832,7 @@ entry:
 define <vscale x 4 x i8> @intrinsic_vssra_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1860,7 +1860,7 @@ entry:
 define <vscale x 8 x i8> @intrinsic_vssra_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1888,7 +1888,7 @@ entry:
 define <vscale x 16 x i8> @intrinsic_vssra_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1916,7 +1916,7 @@ entry:
 define <vscale x 32 x i8> @intrinsic_vssra_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1944,7 +1944,7 @@ entry:
 define <vscale x 64 x i8> @intrinsic_vssra_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1972,7 +1972,7 @@ entry:
 define <vscale x 1 x i16> @intrinsic_vssra_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2000,7 +2000,7 @@ entry:
 define <vscale x 2 x i16> @intrinsic_vssra_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2028,7 +2028,7 @@ entry:
 define <vscale x 4 x i16> @intrinsic_vssra_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2056,7 +2056,7 @@ entry:
 define <vscale x 8 x i16> @intrinsic_vssra_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2084,7 +2084,7 @@ entry:
 define <vscale x 16 x i16> @intrinsic_vssra_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2112,7 +2112,7 @@ entry:
 define <vscale x 32 x i16> @intrinsic_vssra_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -2140,7 +2140,7 @@ entry:
 define <vscale x 1 x i32> @intrinsic_vssra_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2168,7 +2168,7 @@ entry:
 define <vscale x 2 x i32> @intrinsic_vssra_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2196,7 +2196,7 @@ entry:
 define <vscale x 4 x i32> @intrinsic_vssra_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2224,7 +2224,7 @@ entry:
 define <vscale x 8 x i32> @intrinsic_vssra_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2252,7 +2252,7 @@ entry:
 define <vscale x 16 x i32> @intrinsic_vssra_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2280,7 +2280,7 @@ entry:
 define <vscale x 1 x i64> @intrinsic_vssra_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv1i64_nxv1i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2308,7 +2308,7 @@ entry:
 define <vscale x 2 x i64> @intrinsic_vssra_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv2i64_nxv2i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2336,7 +2336,7 @@ entry:
 define <vscale x 4 x i64> @intrinsic_vssra_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv4i64_nxv4i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2364,7 +2364,7 @@ entry:
 define <vscale x 8 x i64> @intrinsic_vssra_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv8i64_nxv8i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll
index aa69e0d2cea4..b62040bac0a2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vssrl_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vssrl_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vssrl_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vssrl_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vssrl_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vssrl_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv64i8_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -308,7 +308,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vssrl_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vssrl_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vssrl_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vssrl_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vssrl_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vssrl_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv32i16_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vssrl_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -588,7 +588,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vssrl_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -628,7 +628,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vssrl_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -668,7 +668,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vssrl_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -708,7 +708,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vssrl_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv16i32_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vssrl_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -788,7 +788,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vssrl_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -828,7 +828,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vssrl_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -868,7 +868,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vssrl_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -908,7 +908,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vssrl_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -948,7 +948,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vssrl_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -988,7 +988,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vssrl_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vssrl_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vssrl_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vssrl_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vssrl_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vssrl_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1228,7 +1228,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vssrl_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1268,7 +1268,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vssrl_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1308,7 +1308,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vssrl_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1348,7 +1348,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vssrl_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1388,7 +1388,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vssrl_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1428,7 +1428,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vssrl_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1456,7 +1456,7 @@ entry:
 define <vscale x 1 x i8> @intrinsic_vssrl_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1484,7 +1484,7 @@ entry:
 define <vscale x 2 x i8> @intrinsic_vssrl_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1512,7 +1512,7 @@ entry:
 define <vscale x 4 x i8> @intrinsic_vssrl_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1540,7 +1540,7 @@ entry:
 define <vscale x 8 x i8> @intrinsic_vssrl_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1568,7 +1568,7 @@ entry:
 define <vscale x 16 x i8> @intrinsic_vssrl_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1596,7 +1596,7 @@ entry:
 define <vscale x 32 x i8> @intrinsic_vssrl_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1624,7 +1624,7 @@ entry:
 define <vscale x 64 x i8> @intrinsic_vssrl_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1652,7 +1652,7 @@ entry:
 define <vscale x 1 x i16> @intrinsic_vssrl_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1680,7 +1680,7 @@ entry:
 define <vscale x 2 x i16> @intrinsic_vssrl_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1708,7 +1708,7 @@ entry:
 define <vscale x 4 x i16> @intrinsic_vssrl_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1736,7 +1736,7 @@ entry:
 define <vscale x 8 x i16> @intrinsic_vssrl_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1764,7 +1764,7 @@ entry:
 define <vscale x 16 x i16> @intrinsic_vssrl_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1792,7 +1792,7 @@ entry:
 define <vscale x 32 x i16> @intrinsic_vssrl_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1820,7 +1820,7 @@ entry:
 define <vscale x 1 x i32> @intrinsic_vssrl_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1848,7 +1848,7 @@ entry:
 define <vscale x 2 x i32> @intrinsic_vssrl_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1876,7 +1876,7 @@ entry:
 define <vscale x 4 x i32> @intrinsic_vssrl_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1904,7 +1904,7 @@ entry:
 define <vscale x 8 x i32> @intrinsic_vssrl_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1932,7 +1932,7 @@ entry:
 define <vscale x 16 x i32> @intrinsic_vssrl_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll
index bf797f6d38ab..d744e15991c2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vssrl_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vssrl_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vssrl_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vssrl_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vssrl_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vssrl_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv64i8_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -308,7 +308,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vssrl_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vssrl_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vssrl_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vssrl_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vssrl_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vssrl_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv32i16_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vssrl_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -588,7 +588,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vssrl_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -628,7 +628,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vssrl_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -668,7 +668,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vssrl_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -708,7 +708,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vssrl_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv16i32_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vssrl_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i64_nxv1i64_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -788,7 +788,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64(
 define <vscale x 2 x i64> @intrinsic_vssrl_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv2i64_nxv2i64_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -828,7 +828,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64(
 define <vscale x 4 x i64> @intrinsic_vssrl_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv4i64_nxv4i64_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -868,7 +868,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64(
 define <vscale x 8 x i64> @intrinsic_vssrl_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv8i64_nxv8i64_nxv8i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -908,7 +908,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vssrl_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -948,7 +948,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vssrl_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -988,7 +988,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vssrl_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vssrl_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vssrl_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vssrl_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vssrl_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vssrl_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1228,7 +1228,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vssrl_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1268,7 +1268,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vssrl_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1308,7 +1308,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vssrl_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1348,7 +1348,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vssrl_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1388,7 +1388,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vssrl_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1428,7 +1428,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vssrl_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1468,7 +1468,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vssrl_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1508,7 +1508,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vssrl_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1548,7 +1548,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vssrl_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1588,7 +1588,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vssrl_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1628,7 +1628,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.i64(
 define <vscale x 1 x i64> @intrinsic_vssrl_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i64_nxv1i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1668,7 +1668,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.i64(
 define <vscale x 2 x i64> @intrinsic_vssrl_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i64_nxv2i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1708,7 +1708,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.i64(
 define <vscale x 4 x i64> @intrinsic_vssrl_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i64_nxv4i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1748,7 +1748,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.i64(
 define <vscale x 8 x i64> @intrinsic_vssrl_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i64_nxv8i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1776,7 +1776,7 @@ entry:
 define <vscale x 1 x i8> @intrinsic_vssrl_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1804,7 +1804,7 @@ entry:
 define <vscale x 2 x i8> @intrinsic_vssrl_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1832,7 +1832,7 @@ entry:
 define <vscale x 4 x i8> @intrinsic_vssrl_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1860,7 +1860,7 @@ entry:
 define <vscale x 8 x i8> @intrinsic_vssrl_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1888,7 +1888,7 @@ entry:
 define <vscale x 16 x i8> @intrinsic_vssrl_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1916,7 +1916,7 @@ entry:
 define <vscale x 32 x i8> @intrinsic_vssrl_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1944,7 +1944,7 @@ entry:
 define <vscale x 64 x i8> @intrinsic_vssrl_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1972,7 +1972,7 @@ entry:
 define <vscale x 1 x i16> @intrinsic_vssrl_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2000,7 +2000,7 @@ entry:
 define <vscale x 2 x i16> @intrinsic_vssrl_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2028,7 +2028,7 @@ entry:
 define <vscale x 4 x i16> @intrinsic_vssrl_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2056,7 +2056,7 @@ entry:
 define <vscale x 8 x i16> @intrinsic_vssrl_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2084,7 +2084,7 @@ entry:
 define <vscale x 16 x i16> @intrinsic_vssrl_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2112,7 +2112,7 @@ entry:
 define <vscale x 32 x i16> @intrinsic_vssrl_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -2140,7 +2140,7 @@ entry:
 define <vscale x 1 x i32> @intrinsic_vssrl_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2168,7 +2168,7 @@ entry:
 define <vscale x 2 x i32> @intrinsic_vssrl_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2196,7 +2196,7 @@ entry:
 define <vscale x 4 x i32> @intrinsic_vssrl_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2224,7 +2224,7 @@ entry:
 define <vscale x 8 x i32> @intrinsic_vssrl_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2252,7 +2252,7 @@ entry:
 define <vscale x 16 x i32> @intrinsic_vssrl_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2280,7 +2280,7 @@ entry:
 define <vscale x 1 x i64> @intrinsic_vssrl_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv1i64_nxv1i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2308,7 +2308,7 @@ entry:
 define <vscale x 2 x i64> @intrinsic_vssrl_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv2i64_nxv2i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2336,7 +2336,7 @@ entry:
 define <vscale x 4 x i64> @intrinsic_vssrl_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv4i64_nxv4i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2364,7 +2364,7 @@ entry:
 define <vscale x 8 x i64> @intrinsic_vssrl_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv8i64_nxv8i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vssub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssub-rv32.ll
index bed91d57262d..37214cf63049 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssub-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssub-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vssub.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vssub_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vssub.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vssub.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vssub_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vssub.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vssub.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vssub_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vssub.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vssub.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vssub_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vssub.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vssub.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vssub_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vssub.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vssub.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vssub_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vssub.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vssub.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vssub_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv64i8_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vssub.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -308,7 +308,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vssub.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vssub_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vssub.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vssub.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vssub_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vssub.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vssub.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vssub_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vssub.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vssub.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vssub_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vssub.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vssub.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vssub_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vssub.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vssub.mask.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vssub_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv32i16_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vssub.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vssub.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vssub_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vssub.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -588,7 +588,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vssub.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vssub_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vssub.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -628,7 +628,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vssub.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vssub_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vssub.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -668,7 +668,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vssub.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vssub_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vssub.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -708,7 +708,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vssub.mask.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vssub_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv16i32_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vssub.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vssub.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vssub_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vssub.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -788,7 +788,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vssub.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vssub_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vssub.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -828,7 +828,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vssub.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vssub_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vssub.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -868,7 +868,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vssub.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vssub_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vssub.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -908,7 +908,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vssub.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vssub_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vssub.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -948,7 +948,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vssub.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vssub_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vssub.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -988,7 +988,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vssub.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vssub_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vssub.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vssub.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vssub_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vssub.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vssub.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vssub_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vssub.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vssub.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vssub_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vssub.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vssub.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vssub_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vssub.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vssub.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vssub_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vssub.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1228,7 +1228,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vssub.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vssub_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vssub.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1268,7 +1268,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vssub.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vssub_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vssub.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1308,7 +1308,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vssub.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vssub_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vssub.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1348,7 +1348,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vssub.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vssub_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vssub.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1388,7 +1388,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vssub.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vssub_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vssub.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1428,7 +1428,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vssub.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vssub_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vssub.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vssub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssub-rv64.ll
index 0170f6ce7b01..ea1027ad95c7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssub-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssub-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vssub.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vssub_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vssub.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vssub.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vssub_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vssub.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vssub.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vssub_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vssub.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vssub.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vssub_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vssub.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vssub.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vssub_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vssub.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vssub.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vssub_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vssub.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vssub.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vssub_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv64i8_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vssub.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -308,7 +308,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vssub.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vssub_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vssub.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vssub.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vssub_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vssub.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vssub.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vssub_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vssub.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vssub.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vssub_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vssub.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vssub.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vssub_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vssub.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vssub.mask.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vssub_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv32i16_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vssub.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vssub.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vssub_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vssub.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -588,7 +588,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vssub.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vssub_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vssub.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -628,7 +628,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vssub.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vssub_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vssub.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -668,7 +668,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vssub.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vssub_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vssub.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -708,7 +708,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vssub.mask.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vssub_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv16i32_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vssub.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vssub.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vssub_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv1i64_nxv1i64_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vssub.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -788,7 +788,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vssub.mask.nxv2i64.nxv2i64(
 define <vscale x 2 x i64> @intrinsic_vssub_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv2i64_nxv2i64_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vssub.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -828,7 +828,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vssub.mask.nxv4i64.nxv4i64(
 define <vscale x 4 x i64> @intrinsic_vssub_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv4i64_nxv4i64_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vssub.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -868,7 +868,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vssub.mask.nxv8i64.nxv8i64(
 define <vscale x 8 x i64> @intrinsic_vssub_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv8i64_nxv8i64_nxv8i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vssub.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -908,7 +908,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vssub.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vssub_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vssub.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -948,7 +948,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vssub.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vssub_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vssub.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -988,7 +988,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vssub.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vssub_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vssub.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vssub.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vssub_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vssub.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vssub.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vssub_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vssub.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vssub.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vssub_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vssub.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vssub.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vssub_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vssub.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vssub.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vssub_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vssub.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1228,7 +1228,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vssub.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vssub_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vssub.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1268,7 +1268,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vssub.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vssub_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vssub.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1308,7 +1308,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vssub.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vssub_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vssub.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1348,7 +1348,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vssub.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vssub_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vssub.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1388,7 +1388,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vssub.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vssub_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vssub.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1428,7 +1428,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vssub.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vssub_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vssub.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1468,7 +1468,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vssub.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vssub_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vssub.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1508,7 +1508,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vssub.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vssub_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vssub.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1548,7 +1548,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vssub.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vssub_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vssub.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1588,7 +1588,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vssub.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vssub_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vssub.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1628,7 +1628,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vssub.mask.nxv1i64.i64(
 define <vscale x 1 x i64> @intrinsic_vssub_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv1i64_nxv1i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vssub.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1668,7 +1668,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vssub.mask.nxv2i64.i64(
 define <vscale x 2 x i64> @intrinsic_vssub_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv2i64_nxv2i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vssub.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1708,7 +1708,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vssub.mask.nxv4i64.i64(
 define <vscale x 4 x i64> @intrinsic_vssub_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv4i64_nxv4i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vssub.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1748,7 +1748,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vssub.mask.nxv8i64.i64(
 define <vscale x 8 x i64> @intrinsic_vssub_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv8i64_nxv8i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vssub.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vssubu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssubu-rv32.ll
index 5892b649a97c..6224bbad87df 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssubu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssubu-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vssubu.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vssubu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vssubu.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vssubu.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vssubu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vssubu.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vssubu.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vssubu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vssubu.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vssubu.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vssubu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vssubu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vssubu.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vssubu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vssubu.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vssubu.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vssubu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vssubu.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vssubu.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vssubu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv64i8_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vssubu.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -308,7 +308,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vssubu.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vssubu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vssubu.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vssubu.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vssubu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vssubu.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vssubu.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vssubu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vssubu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vssubu.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vssubu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vssubu.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vssubu.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vssubu_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vssubu.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vssubu.mask.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vssubu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv32i16_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vssubu.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vssubu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -588,7 +588,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vssubu.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vssubu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vssubu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -628,7 +628,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vssubu.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vssubu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vssubu.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -668,7 +668,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vssubu.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vssubu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vssubu.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -708,7 +708,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vssubu.mask.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vssubu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv16i32_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vssubu.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vssubu.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vssubu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vssubu.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -788,7 +788,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vssubu.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vssubu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vssubu.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -828,7 +828,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vssubu.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vssubu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vssubu.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -868,7 +868,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vssubu.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vssubu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vssubu.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -908,7 +908,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vssubu.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vssubu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vssubu.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -948,7 +948,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vssubu.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vssubu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vssubu.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -988,7 +988,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vssubu.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vssubu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vssubu.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vssubu.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vssubu_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vssubu.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vssubu.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vssubu_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vssubu.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vssubu.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vssubu_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vssubu.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vssubu.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vssubu_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vssubu.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vssubu.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vssubu_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vssubu.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1228,7 +1228,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vssubu.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vssubu_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vssubu.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1268,7 +1268,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vssubu.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vssubu_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vssubu.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1308,7 +1308,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vssubu.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vssubu_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vssubu.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1348,7 +1348,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vssubu.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vssubu_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vssubu.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1388,7 +1388,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vssubu.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vssubu_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vssubu.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1428,7 +1428,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vssubu.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vssubu_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vssubu.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vssubu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssubu-rv64.ll
index 2a8c5d6976f8..d5a1bfcccc7d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssubu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssubu-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vssubu.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vssubu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vssubu.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vssubu.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vssubu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vssubu.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vssubu.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vssubu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vssubu.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vssubu.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vssubu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vssubu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vssubu.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vssubu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vssubu.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vssubu.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vssubu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vssubu.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vssubu.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vssubu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv64i8_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vssubu.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -308,7 +308,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vssubu.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vssubu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vssubu.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vssubu.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vssubu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vssubu.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vssubu.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vssubu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vssubu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vssubu.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vssubu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vssubu.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vssubu.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vssubu_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vssubu.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vssubu.mask.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vssubu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv32i16_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vssubu.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vssubu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -588,7 +588,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vssubu.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vssubu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vssubu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -628,7 +628,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vssubu.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vssubu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vssubu.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -668,7 +668,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vssubu.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vssubu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vssubu.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -708,7 +708,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vssubu.mask.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vssubu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv16i32_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vssubu.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vssubu.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vssubu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv1i64_nxv1i64_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vssubu.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -788,7 +788,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vssubu.mask.nxv2i64.nxv2i64(
 define <vscale x 2 x i64> @intrinsic_vssubu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv2i64_nxv2i64_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vssubu.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -828,7 +828,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vssubu.mask.nxv4i64.nxv4i64(
 define <vscale x 4 x i64> @intrinsic_vssubu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv4i64_nxv4i64_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vssubu.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -868,7 +868,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vssubu.mask.nxv8i64.nxv8i64(
 define <vscale x 8 x i64> @intrinsic_vssubu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv8i64_nxv8i64_nxv8i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vssubu.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -908,7 +908,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vssubu.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vssubu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vssubu.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -948,7 +948,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vssubu.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vssubu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vssubu.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -988,7 +988,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vssubu.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vssubu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vssubu.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vssubu.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vssubu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vssubu.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vssubu.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vssubu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vssubu.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vssubu.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vssubu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vssubu.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vssubu.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vssubu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vssubu.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vssubu.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vssubu_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vssubu.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1228,7 +1228,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vssubu.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vssubu_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vssubu.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1268,7 +1268,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vssubu.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vssubu_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vssubu.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1308,7 +1308,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vssubu.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vssubu_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vssubu.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1348,7 +1348,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vssubu.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vssubu_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vssubu.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1388,7 +1388,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vssubu.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vssubu_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vssubu.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1428,7 +1428,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vssubu.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vssubu_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vssubu.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1468,7 +1468,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vssubu.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vssubu_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vssubu.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1508,7 +1508,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vssubu.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vssubu_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vssubu.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1548,7 +1548,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vssubu.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vssubu_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vssubu.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1588,7 +1588,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vssubu.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vssubu_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vssubu.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1628,7 +1628,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vssubu.mask.nxv1i64.i64(
 define <vscale x 1 x i64> @intrinsic_vssubu_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv1i64_nxv1i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vssubu.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1668,7 +1668,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vssubu.mask.nxv2i64.i64(
 define <vscale x 2 x i64> @intrinsic_vssubu_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv2i64_nxv2i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vssubu.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1708,7 +1708,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vssubu.mask.nxv4i64.i64(
 define <vscale x 4 x i64> @intrinsic_vssubu_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv4i64_nxv4i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vssubu.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1748,7 +1748,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vssubu.mask.nxv8i64.i64(
 define <vscale x 8 x i64> @intrinsic_vssubu_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv8i64_nxv8i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vssubu.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsub-rv32.ll
index 1d005ecbe46c..66f84edeede9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsub-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsub-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vsub_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vsub_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vsub_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vsub_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vsub_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vsub_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vsub_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv64i8_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -308,7 +308,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vsub_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vsub_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vsub_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vsub_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vsub_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vsub_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv32i16_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vsub_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -588,7 +588,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vsub_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -628,7 +628,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vsub_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -668,7 +668,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vsub_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -708,7 +708,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vsub_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv16i32_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vsub_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -788,7 +788,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vsub_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -828,7 +828,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vsub_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -868,7 +868,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vsub_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -908,7 +908,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vsub_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -948,7 +948,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vsub_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -988,7 +988,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vsub_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vsub_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vsub_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vsub_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vsub_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vsub_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1228,7 +1228,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vsub_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1268,7 +1268,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vsub_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1308,7 +1308,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vsub_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1348,7 +1348,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vsub_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1388,7 +1388,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vsub_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1428,7 +1428,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vsub_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsub-rv64.ll
index 6e1a22e4aff0..e8448d265aba 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsub-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsub-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vsub_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vsub_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vsub_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vsub_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vsub_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vsub_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vsub_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv64i8_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -308,7 +308,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vsub_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vsub_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vsub_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vsub_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vsub_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vsub_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv32i16_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vsub_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -588,7 +588,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vsub_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -628,7 +628,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vsub_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -668,7 +668,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vsub_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -708,7 +708,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vsub_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv16i32_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vsub_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv1i64_nxv1i64_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -788,7 +788,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.nxv2i64(
 define <vscale x 2 x i64> @intrinsic_vsub_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv2i64_nxv2i64_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -828,7 +828,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.nxv4i64(
 define <vscale x 4 x i64> @intrinsic_vsub_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv4i64_nxv4i64_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -868,7 +868,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.nxv8i64(
 define <vscale x 8 x i64> @intrinsic_vsub_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv8i64_nxv8i64_nxv8i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -908,7 +908,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vsub_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -948,7 +948,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vsub_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -988,7 +988,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vsub_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vsub_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vsub_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vsub_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vsub_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vsub_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1228,7 +1228,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vsub_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1268,7 +1268,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vsub_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1308,7 +1308,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vsub_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1348,7 +1348,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vsub_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1388,7 +1388,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vsub_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1428,7 +1428,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vsub_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1468,7 +1468,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vsub_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1508,7 +1508,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vsub_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1548,7 +1548,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vsub_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1588,7 +1588,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vsub_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1628,7 +1628,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.i64(
 define <vscale x 1 x i64> @intrinsic_vsub_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv1i64_nxv1i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1668,7 +1668,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.i64(
 define <vscale x 2 x i64> @intrinsic_vsub_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv2i64_nxv2i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1708,7 +1708,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.i64(
 define <vscale x 4 x i64> @intrinsic_vsub_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv4i64_nxv4i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1748,7 +1748,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.i64(
 define <vscale x 8 x i64> @intrinsic_vsub_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv8i64_nxv8i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd-rv32.ll
index 9f15de98d599..b3bb09504555 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwadd-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwadd-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8(
 define <vscale x 1 x i16> @intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.nxv2i8(
 define <vscale x 2 x i16> @intrinsic_vwadd_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv2i16_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.nxv4i8(
 define <vscale x 4 x i16> @intrinsic_vwadd_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv4i16_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.nxv8i8(
 define <vscale x 8 x i16> @intrinsic_vwadd_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv8i16_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.nxv16i8(
 define <vscale x 16 x i16> @intrinsic_vwadd_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv16i16_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.nxv32i8(
 define <vscale x 32 x i16> @intrinsic_vwadd_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv32i16_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -268,7 +268,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.nxv1i16(
 define <vscale x 1 x i32> @intrinsic_vwadd_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv1i32_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -308,7 +308,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.nxv2i16(
 define <vscale x 2 x i32> @intrinsic_vwadd_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv2i32_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -348,7 +348,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.nxv4i16(
 define <vscale x 4 x i32> @intrinsic_vwadd_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv4i32_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -388,7 +388,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.nxv8i16(
 define <vscale x 8 x i32> @intrinsic_vwadd_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv8i32_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -428,7 +428,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.nxv16i16(
 define <vscale x 16 x i32> @intrinsic_vwadd_mask_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv16i32_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -468,7 +468,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i8(
 define <vscale x 1 x i16> @intrinsic_vwadd_mask_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv1i16_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i8(
 define <vscale x 2 x i16> @intrinsic_vwadd_mask_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv2i16_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i8(
 define <vscale x 4 x i16> @intrinsic_vwadd_mask_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv4i16_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i16> %0,
@@ -588,7 +588,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i8(
 define <vscale x 8 x i16> @intrinsic_vwadd_mask_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv8i16_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i16> %0,
@@ -628,7 +628,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i8(
 define <vscale x 16 x i16> @intrinsic_vwadd_mask_vx_nxv16i16_nxv16i8_i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv16i16_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i16> %0,
@@ -668,7 +668,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i8(
 define <vscale x 32 x i16> @intrinsic_vwadd_mask_vx_nxv32i16_nxv32i8_i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv32i16_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i16> %0,
@@ -708,7 +708,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i16(
 define <vscale x 1 x i32> @intrinsic_vwadd_mask_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv1i32_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i16(
 define <vscale x 2 x i32> @intrinsic_vwadd_mask_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv2i32_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i32> %0,
@@ -788,7 +788,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i16(
 define <vscale x 4 x i32> @intrinsic_vwadd_mask_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv4i32_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i32> %0,
@@ -828,7 +828,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i16(
 define <vscale x 8 x i32> @intrinsic_vwadd_mask_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv8i32_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i32> %0,
@@ -868,7 +868,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i16(
 define <vscale x 16 x i32> @intrinsic_vwadd_mask_vx_nxv16i32_nxv16i16_i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv16i32_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd-rv64.ll
index a65ea3a5dfae..7cdc5a521d44 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwadd-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwadd-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8(
 define <vscale x 1 x i16> @intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.nxv2i8(
 define <vscale x 2 x i16> @intrinsic_vwadd_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv2i16_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.nxv4i8(
 define <vscale x 4 x i16> @intrinsic_vwadd_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv4i16_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.nxv8i8(
 define <vscale x 8 x i16> @intrinsic_vwadd_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv8i16_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.nxv16i8(
 define <vscale x 16 x i16> @intrinsic_vwadd_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv16i16_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.nxv32i8(
 define <vscale x 32 x i16> @intrinsic_vwadd_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv32i16_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -268,7 +268,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.nxv1i16(
 define <vscale x 1 x i32> @intrinsic_vwadd_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv1i32_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -308,7 +308,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.nxv2i16(
 define <vscale x 2 x i32> @intrinsic_vwadd_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv2i32_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -348,7 +348,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.nxv4i16(
 define <vscale x 4 x i32> @intrinsic_vwadd_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv4i32_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -388,7 +388,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.nxv8i16(
 define <vscale x 8 x i32> @intrinsic_vwadd_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv8i32_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -428,7 +428,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.nxv16i16(
 define <vscale x 16 x i32> @intrinsic_vwadd_mask_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv16i32_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -468,7 +468,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.nxv1i32(
 define <vscale x 1 x i64> @intrinsic_vwadd_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv1i64_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -508,7 +508,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.nxv2i32(
 define <vscale x 2 x i64> @intrinsic_vwadd_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv2i64_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -548,7 +548,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.nxv4i32(
 define <vscale x 4 x i64> @intrinsic_vwadd_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv4i64_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -588,7 +588,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.nxv8i32(
 define <vscale x 8 x i64> @intrinsic_vwadd_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv8i64_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -628,7 +628,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i8(
 define <vscale x 1 x i16> @intrinsic_vwadd_mask_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv1i16_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i16> %0,
@@ -668,7 +668,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i8(
 define <vscale x 2 x i16> @intrinsic_vwadd_mask_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv2i16_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i16> %0,
@@ -708,7 +708,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i8(
 define <vscale x 4 x i16> @intrinsic_vwadd_mask_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv4i16_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i16> %0,
@@ -748,7 +748,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i8(
 define <vscale x 8 x i16> @intrinsic_vwadd_mask_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv8i16_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i16> %0,
@@ -788,7 +788,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i8(
 define <vscale x 16 x i16> @intrinsic_vwadd_mask_vx_nxv16i16_nxv16i8_i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv16i16_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i16> %0,
@@ -828,7 +828,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i8(
 define <vscale x 32 x i16> @intrinsic_vwadd_mask_vx_nxv32i16_nxv32i8_i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv32i16_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i16> %0,
@@ -868,7 +868,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i16(
 define <vscale x 1 x i32> @intrinsic_vwadd_mask_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv1i32_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i32> %0,
@@ -908,7 +908,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i16(
 define <vscale x 2 x i32> @intrinsic_vwadd_mask_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv2i32_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i32> %0,
@@ -948,7 +948,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i16(
 define <vscale x 4 x i32> @intrinsic_vwadd_mask_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv4i32_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i32> %0,
@@ -988,7 +988,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i16(
 define <vscale x 8 x i32> @intrinsic_vwadd_mask_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv8i32_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i32> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i16(
 define <vscale x 16 x i32> @intrinsic_vwadd_mask_vx_nxv16i32_nxv16i16_i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv16i32_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i32> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32(
 define <vscale x 1 x i64> @intrinsic_vwadd_mask_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv1i64_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32(
     <vscale x 1 x i64> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.i32(
 define <vscale x 2 x i64> @intrinsic_vwadd_mask_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv2i64_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.i32(
     <vscale x 2 x i64> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.i32(
 define <vscale x 4 x i64> @intrinsic_vwadd_mask_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv4i64_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.i32(
     <vscale x 4 x i64> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.i32(
 define <vscale x 8 x i64> @intrinsic_vwadd_mask_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv8i64_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.i32(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv32.ll
index ee29f5631648..b1237b5e4fbd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwadd.w.mask.nxv1i16.nxv1i8(
 define <vscale x 1 x i16> @intrinsic_vwadd.w_mask_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv1i16_nxv1i16_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.mask.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwadd.w.mask.nxv2i16.nxv2i8(
 define <vscale x 2 x i16> @intrinsic_vwadd.w_mask_wv_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv2i16_nxv2i16_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.mask.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwadd.w.mask.nxv4i16.nxv4i8(
 define <vscale x 4 x i16> @intrinsic_vwadd.w_mask_wv_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv4i16_nxv4i16_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.mask.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwadd.w.mask.nxv8i16.nxv8i8(
 define <vscale x 8 x i16> @intrinsic_vwadd.w_mask_wv_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv8i16_nxv8i16_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.mask.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwadd.w.mask.nxv16i16.nxv16i8(
 define <vscale x 16 x i16> @intrinsic_vwadd.w_mask_wv_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv16i16_nxv16i16_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.mask.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.nxv32i8(
 define <vscale x 32 x i16> @intrinsic_vwadd.w_mask_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv32i16_nxv32i16_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -268,7 +268,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwadd.w.mask.nxv1i32.nxv1i16(
 define <vscale x 1 x i32> @intrinsic_vwadd.w_mask_wv_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv1i32_nxv1i32_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -308,7 +308,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwadd.w.mask.nxv2i32.nxv2i16(
 define <vscale x 2 x i32> @intrinsic_vwadd.w_mask_wv_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv2i32_nxv2i32_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -348,7 +348,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwadd.w.mask.nxv4i32.nxv4i16(
 define <vscale x 4 x i32> @intrinsic_vwadd.w_mask_wv_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv4i32_nxv4i32_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -388,7 +388,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwadd.w.mask.nxv8i32.nxv8i16(
 define <vscale x 8 x i32> @intrinsic_vwadd.w_mask_wv_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv8i32_nxv8i32_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -428,7 +428,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwadd.w.mask.nxv16i32.nxv16i16(
 define <vscale x 16 x i32> @intrinsic_vwadd.w_mask_wv_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv16i32_nxv16i32_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -468,7 +468,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwadd.w.mask.nxv1i16.i8(
 define <vscale x 1 x i16> @intrinsic_vwadd.w_mask_wx_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv1i16_nxv1i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.mask.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwadd.w.mask.nxv2i16.i8(
 define <vscale x 2 x i16> @intrinsic_vwadd.w_mask_wx_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv2i16_nxv2i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.mask.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwadd.w.mask.nxv4i16.i8(
 define <vscale x 4 x i16> @intrinsic_vwadd.w_mask_wx_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv4i16_nxv4i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.mask.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -588,7 +588,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwadd.w.mask.nxv8i16.i8(
 define <vscale x 8 x i16> @intrinsic_vwadd.w_mask_wx_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv8i16_nxv8i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.mask.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -628,7 +628,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwadd.w.mask.nxv16i16.i8(
 define <vscale x 16 x i16> @intrinsic_vwadd.w_mask_wx_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv16i16_nxv16i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.mask.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -668,7 +668,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.i8(
 define <vscale x 32 x i16> @intrinsic_vwadd.w_mask_wx_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv32i16_nxv32i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -708,7 +708,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwadd.w.mask.nxv1i32.i16(
 define <vscale x 1 x i32> @intrinsic_vwadd.w_mask_wx_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv1i32_nxv1i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.mask.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwadd.w.mask.nxv2i32.i16(
 define <vscale x 2 x i32> @intrinsic_vwadd.w_mask_wx_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv2i32_nxv2i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.mask.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -788,7 +788,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwadd.w.mask.nxv4i32.i16(
 define <vscale x 4 x i32> @intrinsic_vwadd.w_mask_wx_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv4i32_nxv4i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.mask.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -828,7 +828,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwadd.w.mask.nxv8i32.i16(
 define <vscale x 8 x i32> @intrinsic_vwadd.w_mask_wx_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv8i32_nxv8i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.mask.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -868,7 +868,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwadd.w.mask.nxv16i32.i16(
 define <vscale x 16 x i32> @intrinsic_vwadd.w_mask_wx_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv16i32_nxv16i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.mask.nxv16i32.i16(
     <vscale x 16 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv64.ll
index 828d32506a21..2e5bd9ce13fb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwadd.w.mask.nxv1i16.nxv1i8(
 define <vscale x 1 x i16> @intrinsic_vwadd.w_mask_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv1i16_nxv1i16_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.mask.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwadd.w.mask.nxv2i16.nxv2i8(
 define <vscale x 2 x i16> @intrinsic_vwadd.w_mask_wv_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv2i16_nxv2i16_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.mask.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwadd.w.mask.nxv4i16.nxv4i8(
 define <vscale x 4 x i16> @intrinsic_vwadd.w_mask_wv_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv4i16_nxv4i16_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.mask.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwadd.w.mask.nxv8i16.nxv8i8(
 define <vscale x 8 x i16> @intrinsic_vwadd.w_mask_wv_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv8i16_nxv8i16_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.mask.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwadd.w.mask.nxv16i16.nxv16i8(
 define <vscale x 16 x i16> @intrinsic_vwadd.w_mask_wv_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv16i16_nxv16i16_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.mask.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.nxv32i8(
 define <vscale x 32 x i16> @intrinsic_vwadd.w_mask_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv32i16_nxv32i16_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -268,7 +268,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwadd.w.mask.nxv1i32.nxv1i16(
 define <vscale x 1 x i32> @intrinsic_vwadd.w_mask_wv_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv1i32_nxv1i32_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -308,7 +308,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwadd.w.mask.nxv2i32.nxv2i16(
 define <vscale x 2 x i32> @intrinsic_vwadd.w_mask_wv_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv2i32_nxv2i32_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -348,7 +348,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwadd.w.mask.nxv4i32.nxv4i16(
 define <vscale x 4 x i32> @intrinsic_vwadd.w_mask_wv_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv4i32_nxv4i32_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -388,7 +388,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwadd.w.mask.nxv8i32.nxv8i16(
 define <vscale x 8 x i32> @intrinsic_vwadd.w_mask_wv_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv8i32_nxv8i32_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -428,7 +428,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwadd.w.mask.nxv16i32.nxv16i16(
 define <vscale x 16 x i32> @intrinsic_vwadd.w_mask_wv_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv16i32_nxv16i32_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -468,7 +468,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32(
 define <vscale x 1 x i64> @intrinsic_vwadd.w_mask_wv_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv1i64_nxv1i64_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -508,7 +508,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vwadd.w.mask.nxv2i64.nxv2i32(
 define <vscale x 2 x i64> @intrinsic_vwadd.w_mask_wv_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv2i64_nxv2i64_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -548,7 +548,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vwadd.w.mask.nxv4i64.nxv4i32(
 define <vscale x 4 x i64> @intrinsic_vwadd.w_mask_wv_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv4i64_nxv4i64_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -588,7 +588,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vwadd.w.mask.nxv8i64.nxv8i32(
 define <vscale x 8 x i64> @intrinsic_vwadd.w_mask_wv_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv8i64_nxv8i64_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -628,7 +628,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwadd.w.mask.nxv1i16.i8(
 define <vscale x 1 x i16> @intrinsic_vwadd.w_mask_wx_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv1i16_nxv1i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.mask.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -668,7 +668,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwadd.w.mask.nxv2i16.i8(
 define <vscale x 2 x i16> @intrinsic_vwadd.w_mask_wx_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv2i16_nxv2i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.mask.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -708,7 +708,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwadd.w.mask.nxv4i16.i8(
 define <vscale x 4 x i16> @intrinsic_vwadd.w_mask_wx_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv4i16_nxv4i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.mask.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -748,7 +748,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwadd.w.mask.nxv8i16.i8(
 define <vscale x 8 x i16> @intrinsic_vwadd.w_mask_wx_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv8i16_nxv8i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.mask.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -788,7 +788,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwadd.w.mask.nxv16i16.i8(
 define <vscale x 16 x i16> @intrinsic_vwadd.w_mask_wx_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv16i16_nxv16i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.mask.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -828,7 +828,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.i8(
 define <vscale x 32 x i16> @intrinsic_vwadd.w_mask_wx_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv32i16_nxv32i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -868,7 +868,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwadd.w.mask.nxv1i32.i16(
 define <vscale x 1 x i32> @intrinsic_vwadd.w_mask_wx_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv1i32_nxv1i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.mask.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -908,7 +908,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwadd.w.mask.nxv2i32.i16(
 define <vscale x 2 x i32> @intrinsic_vwadd.w_mask_wx_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv2i32_nxv2i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.mask.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -948,7 +948,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwadd.w.mask.nxv4i32.i16(
 define <vscale x 4 x i32> @intrinsic_vwadd.w_mask_wx_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv4i32_nxv4i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.mask.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -988,7 +988,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwadd.w.mask.nxv8i32.i16(
 define <vscale x 8 x i32> @intrinsic_vwadd.w_mask_wx_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv8i32_nxv8i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.mask.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwadd.w.mask.nxv16i32.i16(
 define <vscale x 16 x i32> @intrinsic_vwadd.w_mask_wx_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv16i32_nxv16i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.mask.nxv16i32.i16(
     <vscale x 16 x i32> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.i32(
 define <vscale x 1 x i64> @intrinsic_vwadd.w_mask_wx_nxv1i64_nxv1i64_i32(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv1i64_nxv1i64_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.i32(
     <vscale x 1 x i64> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vwadd.w.mask.nxv2i64.i32(
 define <vscale x 2 x i64> @intrinsic_vwadd.w_mask_wx_nxv2i64_nxv2i64_i32(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv2i64_nxv2i64_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.mask.nxv2i64.i32(
     <vscale x 2 x i64> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vwadd.w.mask.nxv4i64.i32(
 define <vscale x 4 x i64> @intrinsic_vwadd.w_mask_wx_nxv4i64_nxv4i64_i32(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv4i64_nxv4i64_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.mask.nxv4i64.i32(
     <vscale x 4 x i64> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vwadd.w.mask.nxv8i64.i32(
 define <vscale x 8 x i64> @intrinsic_vwadd.w_mask_wx_nxv8i64_nxv8i64_i32(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv8i64_nxv8i64_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.mask.nxv8i64.i32(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv32.ll
index 252517297796..f76914d28513 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.nxv1i8(
 define <vscale x 1 x i16> @intrinsic_vwaddu_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv1i16_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.nxv2i8(
 define <vscale x 2 x i16> @intrinsic_vwaddu_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv2i16_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.nxv4i8(
 define <vscale x 4 x i16> @intrinsic_vwaddu_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv4i16_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.nxv8i8(
 define <vscale x 8 x i16> @intrinsic_vwaddu_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv8i16_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.nxv16i8(
 define <vscale x 16 x i16> @intrinsic_vwaddu_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv16i16_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.nxv32i8(
 define <vscale x 32 x i16> @intrinsic_vwaddu_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv32i16_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -268,7 +268,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.nxv1i16(
 define <vscale x 1 x i32> @intrinsic_vwaddu_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv1i32_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -308,7 +308,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.nxv2i16(
 define <vscale x 2 x i32> @intrinsic_vwaddu_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv2i32_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -348,7 +348,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.nxv4i16(
 define <vscale x 4 x i32> @intrinsic_vwaddu_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv4i32_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -388,7 +388,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.nxv8i16(
 define <vscale x 8 x i32> @intrinsic_vwaddu_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv8i32_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -428,7 +428,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.nxv16i16(
 define <vscale x 16 x i32> @intrinsic_vwaddu_mask_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv16i32_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -468,7 +468,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i8(
 define <vscale x 1 x i16> @intrinsic_vwaddu_mask_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv1i16_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i8(
 define <vscale x 2 x i16> @intrinsic_vwaddu_mask_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv2i16_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i8(
 define <vscale x 4 x i16> @intrinsic_vwaddu_mask_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv4i16_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i16> %0,
@@ -588,7 +588,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i8(
 define <vscale x 8 x i16> @intrinsic_vwaddu_mask_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv8i16_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i16> %0,
@@ -628,7 +628,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i8(
 define <vscale x 16 x i16> @intrinsic_vwaddu_mask_vx_nxv16i16_nxv16i8_i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv16i16_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i16> %0,
@@ -668,7 +668,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i8(
 define <vscale x 32 x i16> @intrinsic_vwaddu_mask_vx_nxv32i16_nxv32i8_i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv32i16_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i16> %0,
@@ -708,7 +708,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i16(
 define <vscale x 1 x i32> @intrinsic_vwaddu_mask_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv1i32_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i16(
 define <vscale x 2 x i32> @intrinsic_vwaddu_mask_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv2i32_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i32> %0,
@@ -788,7 +788,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i16(
 define <vscale x 4 x i32> @intrinsic_vwaddu_mask_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv4i32_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i32> %0,
@@ -828,7 +828,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i16(
 define <vscale x 8 x i32> @intrinsic_vwaddu_mask_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv8i32_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i32> %0,
@@ -868,7 +868,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i16(
 define <vscale x 16 x i32> @intrinsic_vwaddu_mask_vx_nxv16i32_nxv16i16_i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv16i32_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv64.ll
index 631dade5b8e1..4a1228718bca 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.nxv1i8(
 define <vscale x 1 x i16> @intrinsic_vwaddu_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv1i16_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.nxv2i8(
 define <vscale x 2 x i16> @intrinsic_vwaddu_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv2i16_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.nxv4i8(
 define <vscale x 4 x i16> @intrinsic_vwaddu_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv4i16_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.nxv8i8(
 define <vscale x 8 x i16> @intrinsic_vwaddu_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv8i16_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.nxv16i8(
 define <vscale x 16 x i16> @intrinsic_vwaddu_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv16i16_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.nxv32i8(
 define <vscale x 32 x i16> @intrinsic_vwaddu_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv32i16_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -268,7 +268,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.nxv1i16(
 define <vscale x 1 x i32> @intrinsic_vwaddu_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv1i32_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -308,7 +308,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.nxv2i16(
 define <vscale x 2 x i32> @intrinsic_vwaddu_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv2i32_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -348,7 +348,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.nxv4i16(
 define <vscale x 4 x i32> @intrinsic_vwaddu_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv4i32_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -388,7 +388,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.nxv8i16(
 define <vscale x 8 x i32> @intrinsic_vwaddu_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv8i32_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -428,7 +428,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.nxv16i16(
 define <vscale x 16 x i32> @intrinsic_vwaddu_mask_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv16i32_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -468,7 +468,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.nxv1i32(
 define <vscale x 1 x i64> @intrinsic_vwaddu_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv1i64_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -508,7 +508,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.nxv2i32(
 define <vscale x 2 x i64> @intrinsic_vwaddu_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv2i64_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -548,7 +548,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.nxv4i32(
 define <vscale x 4 x i64> @intrinsic_vwaddu_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv4i64_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -588,7 +588,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.nxv8i32(
 define <vscale x 8 x i64> @intrinsic_vwaddu_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv8i64_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -628,7 +628,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i8(
 define <vscale x 1 x i16> @intrinsic_vwaddu_mask_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv1i16_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i16> %0,
@@ -668,7 +668,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i8(
 define <vscale x 2 x i16> @intrinsic_vwaddu_mask_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv2i16_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i16> %0,
@@ -708,7 +708,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i8(
 define <vscale x 4 x i16> @intrinsic_vwaddu_mask_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv4i16_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i16> %0,
@@ -748,7 +748,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i8(
 define <vscale x 8 x i16> @intrinsic_vwaddu_mask_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv8i16_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i16> %0,
@@ -788,7 +788,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i8(
 define <vscale x 16 x i16> @intrinsic_vwaddu_mask_vx_nxv16i16_nxv16i8_i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv16i16_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i16> %0,
@@ -828,7 +828,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i8(
 define <vscale x 32 x i16> @intrinsic_vwaddu_mask_vx_nxv32i16_nxv32i8_i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv32i16_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i16> %0,
@@ -868,7 +868,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i16(
 define <vscale x 1 x i32> @intrinsic_vwaddu_mask_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv1i32_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i32> %0,
@@ -908,7 +908,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i16(
 define <vscale x 2 x i32> @intrinsic_vwaddu_mask_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv2i32_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i32> %0,
@@ -948,7 +948,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i16(
 define <vscale x 4 x i32> @intrinsic_vwaddu_mask_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv4i32_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i32> %0,
@@ -988,7 +988,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i16(
 define <vscale x 8 x i32> @intrinsic_vwaddu_mask_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv8i32_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i32> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i16(
 define <vscale x 16 x i32> @intrinsic_vwaddu_mask_vx_nxv16i32_nxv16i16_i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv16i32_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i32> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32(
 define <vscale x 1 x i64> @intrinsic_vwaddu_mask_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv1i64_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32(
     <vscale x 1 x i64> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.i32(
 define <vscale x 2 x i64> @intrinsic_vwaddu_mask_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv2i64_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.i32(
     <vscale x 2 x i64> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.i32(
 define <vscale x 4 x i64> @intrinsic_vwaddu_mask_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv4i64_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.i32(
     <vscale x 4 x i64> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.i32(
 define <vscale x 8 x i64> @intrinsic_vwaddu_mask_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv8i64_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.i32(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv32.ll
index ae5241d60dae..7a435299e9c7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwaddu.w.mask.nxv1i16.nxv1i8(
 define <vscale x 1 x i16> @intrinsic_vwaddu.w_mask_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv1i16_nxv1i16_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.mask.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwaddu.w.mask.nxv2i16.nxv2i8(
 define <vscale x 2 x i16> @intrinsic_vwaddu.w_mask_wv_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv2i16_nxv2i16_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.mask.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwaddu.w.mask.nxv4i16.nxv4i8(
 define <vscale x 4 x i16> @intrinsic_vwaddu.w_mask_wv_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv4i16_nxv4i16_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.mask.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwaddu.w.mask.nxv8i16.nxv8i8(
 define <vscale x 8 x i16> @intrinsic_vwaddu.w_mask_wv_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv8i16_nxv8i16_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.mask.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwaddu.w.mask.nxv16i16.nxv16i8(
 define <vscale x 16 x i16> @intrinsic_vwaddu.w_mask_wv_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv16i16_nxv16i16_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.mask.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwaddu.w.mask.nxv32i16.nxv32i8(
 define <vscale x 32 x i16> @intrinsic_vwaddu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.mask.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -268,7 +268,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwaddu.w.mask.nxv1i32.nxv1i16(
 define <vscale x 1 x i32> @intrinsic_vwaddu.w_mask_wv_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv1i32_nxv1i32_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -308,7 +308,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwaddu.w.mask.nxv2i32.nxv2i16(
 define <vscale x 2 x i32> @intrinsic_vwaddu.w_mask_wv_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv2i32_nxv2i32_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -348,7 +348,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwaddu.w.mask.nxv4i32.nxv4i16(
 define <vscale x 4 x i32> @intrinsic_vwaddu.w_mask_wv_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv4i32_nxv4i32_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -388,7 +388,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwaddu.w.mask.nxv8i32.nxv8i16(
 define <vscale x 8 x i32> @intrinsic_vwaddu.w_mask_wv_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv8i32_nxv8i32_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -428,7 +428,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwaddu.w.mask.nxv16i32.nxv16i16(
 define <vscale x 16 x i32> @intrinsic_vwaddu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -468,7 +468,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwaddu.w.mask.nxv1i16.i8(
 define <vscale x 1 x i16> @intrinsic_vwaddu.w_mask_wx_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv1i16_nxv1i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.mask.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwaddu.w.mask.nxv2i16.i8(
 define <vscale x 2 x i16> @intrinsic_vwaddu.w_mask_wx_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv2i16_nxv2i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.mask.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwaddu.w.mask.nxv4i16.i8(
 define <vscale x 4 x i16> @intrinsic_vwaddu.w_mask_wx_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv4i16_nxv4i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.mask.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -588,7 +588,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwaddu.w.mask.nxv8i16.i8(
 define <vscale x 8 x i16> @intrinsic_vwaddu.w_mask_wx_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv8i16_nxv8i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.mask.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -628,7 +628,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwaddu.w.mask.nxv16i16.i8(
 define <vscale x 16 x i16> @intrinsic_vwaddu.w_mask_wx_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv16i16_nxv16i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.mask.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -668,7 +668,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwaddu.w.mask.nxv32i16.i8(
 define <vscale x 32 x i16> @intrinsic_vwaddu.w_mask_wx_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv32i16_nxv32i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.mask.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -708,7 +708,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwaddu.w.mask.nxv1i32.i16(
 define <vscale x 1 x i32> @intrinsic_vwaddu.w_mask_wx_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv1i32_nxv1i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.mask.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwaddu.w.mask.nxv2i32.i16(
 define <vscale x 2 x i32> @intrinsic_vwaddu.w_mask_wx_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv2i32_nxv2i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.mask.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -788,7 +788,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwaddu.w.mask.nxv4i32.i16(
 define <vscale x 4 x i32> @intrinsic_vwaddu.w_mask_wx_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv4i32_nxv4i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.mask.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -828,7 +828,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwaddu.w.mask.nxv8i32.i16(
 define <vscale x 8 x i32> @intrinsic_vwaddu.w_mask_wx_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv8i32_nxv8i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.mask.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -868,7 +868,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwaddu.w.mask.nxv16i32.i16(
 define <vscale x 16 x i32> @intrinsic_vwaddu.w_mask_wx_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv16i32_nxv16i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.mask.nxv16i32.i16(
     <vscale x 16 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv64.ll
index e260842cab8a..3bab7a089ee4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwaddu.w.mask.nxv1i16.nxv1i8(
 define <vscale x 1 x i16> @intrinsic_vwaddu.w_mask_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv1i16_nxv1i16_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.mask.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwaddu.w.mask.nxv2i16.nxv2i8(
 define <vscale x 2 x i16> @intrinsic_vwaddu.w_mask_wv_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv2i16_nxv2i16_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.mask.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwaddu.w.mask.nxv4i16.nxv4i8(
 define <vscale x 4 x i16> @intrinsic_vwaddu.w_mask_wv_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv4i16_nxv4i16_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.mask.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwaddu.w.mask.nxv8i16.nxv8i8(
 define <vscale x 8 x i16> @intrinsic_vwaddu.w_mask_wv_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv8i16_nxv8i16_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.mask.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwaddu.w.mask.nxv16i16.nxv16i8(
 define <vscale x 16 x i16> @intrinsic_vwaddu.w_mask_wv_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv16i16_nxv16i16_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.mask.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwaddu.w.mask.nxv32i16.nxv32i8(
 define <vscale x 32 x i16> @intrinsic_vwaddu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.mask.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -268,7 +268,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwaddu.w.mask.nxv1i32.nxv1i16(
 define <vscale x 1 x i32> @intrinsic_vwaddu.w_mask_wv_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv1i32_nxv1i32_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -308,7 +308,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwaddu.w.mask.nxv2i32.nxv2i16(
 define <vscale x 2 x i32> @intrinsic_vwaddu.w_mask_wv_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv2i32_nxv2i32_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -348,7 +348,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwaddu.w.mask.nxv4i32.nxv4i16(
 define <vscale x 4 x i32> @intrinsic_vwaddu.w_mask_wv_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv4i32_nxv4i32_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -388,7 +388,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwaddu.w.mask.nxv8i32.nxv8i16(
 define <vscale x 8 x i32> @intrinsic_vwaddu.w_mask_wv_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv8i32_nxv8i32_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -428,7 +428,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwaddu.w.mask.nxv16i32.nxv16i16(
 define <vscale x 16 x i32> @intrinsic_vwaddu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -468,7 +468,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwaddu.w.mask.nxv1i64.nxv1i32(
 define <vscale x 1 x i64> @intrinsic_vwaddu.w_mask_wv_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv1i64_nxv1i64_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -508,7 +508,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vwaddu.w.mask.nxv2i64.nxv2i32(
 define <vscale x 2 x i64> @intrinsic_vwaddu.w_mask_wv_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv2i64_nxv2i64_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -548,7 +548,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vwaddu.w.mask.nxv4i64.nxv4i32(
 define <vscale x 4 x i64> @intrinsic_vwaddu.w_mask_wv_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv4i64_nxv4i64_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -588,7 +588,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vwaddu.w.mask.nxv8i64.nxv8i32(
 define <vscale x 8 x i64> @intrinsic_vwaddu.w_mask_wv_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv8i64_nxv8i64_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -628,7 +628,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwaddu.w.mask.nxv1i16.i8(
 define <vscale x 1 x i16> @intrinsic_vwaddu.w_mask_wx_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv1i16_nxv1i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.mask.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -668,7 +668,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwaddu.w.mask.nxv2i16.i8(
 define <vscale x 2 x i16> @intrinsic_vwaddu.w_mask_wx_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv2i16_nxv2i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.mask.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -708,7 +708,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwaddu.w.mask.nxv4i16.i8(
 define <vscale x 4 x i16> @intrinsic_vwaddu.w_mask_wx_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv4i16_nxv4i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.mask.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -748,7 +748,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwaddu.w.mask.nxv8i16.i8(
 define <vscale x 8 x i16> @intrinsic_vwaddu.w_mask_wx_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv8i16_nxv8i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.mask.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -788,7 +788,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwaddu.w.mask.nxv16i16.i8(
 define <vscale x 16 x i16> @intrinsic_vwaddu.w_mask_wx_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv16i16_nxv16i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.mask.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -828,7 +828,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwaddu.w.mask.nxv32i16.i8(
 define <vscale x 32 x i16> @intrinsic_vwaddu.w_mask_wx_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv32i16_nxv32i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.mask.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -868,7 +868,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwaddu.w.mask.nxv1i32.i16(
 define <vscale x 1 x i32> @intrinsic_vwaddu.w_mask_wx_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv1i32_nxv1i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.mask.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -908,7 +908,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwaddu.w.mask.nxv2i32.i16(
 define <vscale x 2 x i32> @intrinsic_vwaddu.w_mask_wx_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv2i32_nxv2i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.mask.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -948,7 +948,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwaddu.w.mask.nxv4i32.i16(
 define <vscale x 4 x i32> @intrinsic_vwaddu.w_mask_wx_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv4i32_nxv4i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.mask.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -988,7 +988,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwaddu.w.mask.nxv8i32.i16(
 define <vscale x 8 x i32> @intrinsic_vwaddu.w_mask_wx_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv8i32_nxv8i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.mask.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwaddu.w.mask.nxv16i32.i16(
 define <vscale x 16 x i32> @intrinsic_vwaddu.w_mask_wx_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv16i32_nxv16i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.mask.nxv16i32.i16(
     <vscale x 16 x i32> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwaddu.w.mask.nxv1i64.i32(
 define <vscale x 1 x i64> @intrinsic_vwaddu.w_mask_wx_nxv1i64_nxv1i64_i32(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv1i64_nxv1i64_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.mask.nxv1i64.i32(
     <vscale x 1 x i64> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vwaddu.w.mask.nxv2i64.i32(
 define <vscale x 2 x i64> @intrinsic_vwaddu.w_mask_wx_nxv2i64_nxv2i64_i32(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv2i64_nxv2i64_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.mask.nxv2i64.i32(
     <vscale x 2 x i64> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vwaddu.w.mask.nxv4i64.i32(
 define <vscale x 4 x i64> @intrinsic_vwaddu.w_mask_wx_nxv4i64_nxv4i64_i32(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv4i64_nxv4i64_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.mask.nxv4i64.i32(
     <vscale x 4 x i64> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vwaddu.w.mask.nxv8i64.i32(
 define <vscale x 8 x i64> @intrinsic_vwaddu.w_mask_wx_nxv8i64_nxv8i64_i32(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv8i64_nxv8i64_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.mask.nxv8i64.i32(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv32.ll
index 539177f8d78e..fbe54429ad91 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv32.ll
@@ -10,7 +10,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.nxv1i8(
 define <vscale x 1 x i16>  @intrinsic_vwmacc_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv1i16_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8(
 define <vscale x 1 x i16>  @intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmacc.nxv2i16.nxv2i8(
 define <vscale x 2 x i16>  @intrinsic_vwmacc_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv2i16_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmacc.mask.nxv2i16.nxv2i8(
 define <vscale x 2 x i16>  @intrinsic_vwmacc_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv2i16_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmacc.nxv4i16.nxv4i8(
 define <vscale x 4 x i16>  @intrinsic_vwmacc_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv4i16_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmacc.mask.nxv4i16.nxv4i8(
 define <vscale x 4 x i16>  @intrinsic_vwmacc_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv4i16_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmacc.nxv8i16.nxv8i8(
 define <vscale x 8 x i16>  @intrinsic_vwmacc_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv8i16_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v16, v18, v19
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmacc.mask.nxv8i16.nxv8i8(
 define <vscale x 8 x i16>  @intrinsic_vwmacc_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv8i16_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v16, v18, v19, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -194,7 +194,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmacc.nxv16i16.nxv16i8(
 define <vscale x 16 x i16>  @intrinsic_vwmacc_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv16i16_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v16, v20, v22
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -217,7 +217,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmacc.mask.nxv16i16.nxv16i8(
 define <vscale x 16 x i16>  @intrinsic_vwmacc_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv16i16_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v16, v20, v22, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -243,7 +243,7 @@ define <vscale x 32 x i16>  @intrinsic_vwmacc_vv_nxv32i16_nxv32i8_nxv32i8(<vscal
 ; CHECK-NEXT:    vsetvli a3, zero, e8,m4,ta,mu
 ; CHECK-NEXT:    vle8.v v28, (a1)
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v16, v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -269,7 +269,7 @@ define <vscale x 32 x i16>  @intrinsic_vwmacc_mask_vv_nxv32i16_nxv32i8_nxv32i8(<
 ; CHECK-NEXT:    vsetvli a3, zero, e8,m4,ta,mu
 ; CHECK-NEXT:    vle8.v v28, (a1)
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v16, v8, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -292,7 +292,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmacc.nxv1i32.nxv1i16(
 define <vscale x 1 x i32>  @intrinsic_vwmacc_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv1i32_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -315,7 +315,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmacc.mask.nxv1i32.nxv1i16(
 define <vscale x 1 x i32>  @intrinsic_vwmacc_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv1i32_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -338,7 +338,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmacc.nxv2i32.nxv2i16(
 define <vscale x 2 x i32>  @intrinsic_vwmacc_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv2i32_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -361,7 +361,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmacc.mask.nxv2i32.nxv2i16(
 define <vscale x 2 x i32>  @intrinsic_vwmacc_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv2i32_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -384,7 +384,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmacc.nxv4i32.nxv4i16(
 define <vscale x 4 x i32>  @intrinsic_vwmacc_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv4i32_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v16, v18, v19
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -407,7 +407,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmacc.mask.nxv4i32.nxv4i16(
 define <vscale x 4 x i32>  @intrinsic_vwmacc_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv4i32_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v16, v18, v19, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -430,7 +430,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmacc.nxv8i32.nxv8i16(
 define <vscale x 8 x i32>  @intrinsic_vwmacc_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv8i32_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v16, v20, v22
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -453,7 +453,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmacc.mask.nxv8i32.nxv8i16(
 define <vscale x 8 x i32>  @intrinsic_vwmacc_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv8i32_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v16, v20, v22, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -479,7 +479,7 @@ define <vscale x 16 x i32>  @intrinsic_vwmacc_vv_nxv16i32_nxv16i16_nxv16i16(<vsc
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a1)
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v16, v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -505,7 +505,7 @@ define <vscale x 16 x i32>  @intrinsic_vwmacc_mask_vv_nxv16i32_nxv16i16_nxv16i16
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a1)
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v16, v8, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -528,7 +528,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.i8(
 define <vscale x 1 x i16>  @intrinsic_vwmacc_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv1i16_i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v16, a0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -551,7 +551,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.i8(
 define <vscale x 1 x i16> @intrinsic_vwmacc_mask_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv1i16_i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v16, a0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -574,7 +574,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmacc.nxv2i16.i8(
 define <vscale x 2 x i16>  @intrinsic_vwmacc_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv2i16_i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v16, a0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -597,7 +597,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmacc.mask.nxv2i16.i8(
 define <vscale x 2 x i16> @intrinsic_vwmacc_mask_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv2i16_i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v16, a0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -620,7 +620,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmacc.nxv4i16.i8(
 define <vscale x 4 x i16>  @intrinsic_vwmacc_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv4i16_i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v16, a0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -643,7 +643,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmacc.mask.nxv4i16.i8(
 define <vscale x 4 x i16> @intrinsic_vwmacc_mask_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv4i16_i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v16, a0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -666,7 +666,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmacc.nxv8i16.i8(
 define <vscale x 8 x i16>  @intrinsic_vwmacc_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv8i16_i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v16, a0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -689,7 +689,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmacc.mask.nxv8i16.i8(
 define <vscale x 8 x i16> @intrinsic_vwmacc_mask_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv8i16_i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v16, a0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -712,7 +712,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmacc.nxv16i16.i8(
 define <vscale x 16 x i16>  @intrinsic_vwmacc_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv16i16_i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v16, a0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -735,7 +735,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmacc.mask.nxv16i16.i8(
 define <vscale x 16 x i16> @intrinsic_vwmacc_mask_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv16i16_i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v16, a0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -760,7 +760,7 @@ define <vscale x 32 x i16>  @intrinsic_vwmacc_vx_nxv32i16_i8_nxv32i8(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a3, zero, e8,m4,ta,mu
 ; CHECK-NEXT:    vle8.v v28, (a1)
-; CHECK-NEXT:    vsetvli a1, a2, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a2, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v16, a0, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -785,7 +785,7 @@ define <vscale x 32 x i16> @intrinsic_vwmacc_mask_vx_nxv32i16_i8_nxv32i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a3, zero, e8,m4,ta,mu
 ; CHECK-NEXT:    vle8.v v28, (a1)
-; CHECK-NEXT:    vsetvli a1, a2, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a2, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v16, a0, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -808,7 +808,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmacc.nxv1i32.i16(
 define <vscale x 1 x i32>  @intrinsic_vwmacc_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv1i32_i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v16, a0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -831,7 +831,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmacc.mask.nxv1i32.i16(
 define <vscale x 1 x i32> @intrinsic_vwmacc_mask_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv1i32_i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v16, a0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -854,7 +854,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmacc.nxv2i32.i16(
 define <vscale x 2 x i32>  @intrinsic_vwmacc_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv2i32_i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v16, a0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -877,7 +877,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmacc.mask.nxv2i32.i16(
 define <vscale x 2 x i32> @intrinsic_vwmacc_mask_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv2i32_i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v16, a0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -900,7 +900,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmacc.nxv4i32.i16(
 define <vscale x 4 x i32>  @intrinsic_vwmacc_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv4i32_i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v16, a0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -923,7 +923,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmacc.mask.nxv4i32.i16(
 define <vscale x 4 x i32> @intrinsic_vwmacc_mask_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv4i32_i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v16, a0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -946,7 +946,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmacc.nxv8i32.i16(
 define <vscale x 8 x i32>  @intrinsic_vwmacc_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv8i32_i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v16, a0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -969,7 +969,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmacc.mask.nxv8i32.i16(
 define <vscale x 8 x i32> @intrinsic_vwmacc_mask_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv8i32_i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v16, a0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -994,7 +994,7 @@ define <vscale x 16 x i32>  @intrinsic_vwmacc_vx_nxv16i32_i16_nxv16i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a1)
-; CHECK-NEXT:    vsetvli a1, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a2, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v16, a0, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1019,7 +1019,7 @@ define <vscale x 16 x i32> @intrinsic_vwmacc_mask_vx_nxv16i32_i16_nxv16i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a1)
-; CHECK-NEXT:    vsetvli a1, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a2, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v16, a0, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv64.ll
index a6ed911b7736..37b4cc55529a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv64.ll
@@ -10,7 +10,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.nxv1i8(
 define <vscale x 1 x i16>  @intrinsic_vwmacc_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv1i16_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8(
 define <vscale x 1 x i16>  @intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmacc.nxv2i16.nxv2i8(
 define <vscale x 2 x i16>  @intrinsic_vwmacc_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv2i16_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmacc.mask.nxv2i16.nxv2i8(
 define <vscale x 2 x i16>  @intrinsic_vwmacc_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv2i16_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmacc.nxv4i16.nxv4i8(
 define <vscale x 4 x i16>  @intrinsic_vwmacc_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv4i16_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmacc.mask.nxv4i16.nxv4i8(
 define <vscale x 4 x i16>  @intrinsic_vwmacc_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv4i16_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmacc.nxv8i16.nxv8i8(
 define <vscale x 8 x i16>  @intrinsic_vwmacc_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv8i16_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v16, v18, v19
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmacc.mask.nxv8i16.nxv8i8(
 define <vscale x 8 x i16>  @intrinsic_vwmacc_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv8i16_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v16, v18, v19, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -194,7 +194,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmacc.nxv16i16.nxv16i8(
 define <vscale x 16 x i16>  @intrinsic_vwmacc_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv16i16_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v16, v20, v22
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -217,7 +217,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmacc.mask.nxv16i16.nxv16i8(
 define <vscale x 16 x i16>  @intrinsic_vwmacc_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv16i16_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v16, v20, v22, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -243,7 +243,7 @@ define <vscale x 32 x i16>  @intrinsic_vwmacc_vv_nxv32i16_nxv32i8_nxv32i8(<vscal
 ; CHECK-NEXT:    vsetvli a3, zero, e8,m4,ta,mu
 ; CHECK-NEXT:    vle8.v v28, (a1)
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v16, v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -269,7 +269,7 @@ define <vscale x 32 x i16>  @intrinsic_vwmacc_mask_vv_nxv32i16_nxv32i8_nxv32i8(<
 ; CHECK-NEXT:    vsetvli a3, zero, e8,m4,ta,mu
 ; CHECK-NEXT:    vle8.v v28, (a1)
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v16, v8, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -292,7 +292,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmacc.nxv1i32.nxv1i16(
 define <vscale x 1 x i32>  @intrinsic_vwmacc_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv1i32_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -315,7 +315,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmacc.mask.nxv1i32.nxv1i16(
 define <vscale x 1 x i32>  @intrinsic_vwmacc_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv1i32_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -338,7 +338,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmacc.nxv2i32.nxv2i16(
 define <vscale x 2 x i32>  @intrinsic_vwmacc_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv2i32_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -361,7 +361,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmacc.mask.nxv2i32.nxv2i16(
 define <vscale x 2 x i32>  @intrinsic_vwmacc_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv2i32_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -384,7 +384,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmacc.nxv4i32.nxv4i16(
 define <vscale x 4 x i32>  @intrinsic_vwmacc_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv4i32_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v16, v18, v19
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -407,7 +407,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmacc.mask.nxv4i32.nxv4i16(
 define <vscale x 4 x i32>  @intrinsic_vwmacc_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv4i32_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v16, v18, v19, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -430,7 +430,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmacc.nxv8i32.nxv8i16(
 define <vscale x 8 x i32>  @intrinsic_vwmacc_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv8i32_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v16, v20, v22
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -453,7 +453,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmacc.mask.nxv8i32.nxv8i16(
 define <vscale x 8 x i32>  @intrinsic_vwmacc_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv8i32_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v16, v20, v22, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -479,7 +479,7 @@ define <vscale x 16 x i32>  @intrinsic_vwmacc_vv_nxv16i32_nxv16i16_nxv16i16(<vsc
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a1)
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v16, v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -505,7 +505,7 @@ define <vscale x 16 x i32>  @intrinsic_vwmacc_mask_vv_nxv16i32_nxv16i16_nxv16i16
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a1)
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v16, v8, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -528,7 +528,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmacc.nxv1i64.nxv1i32(
 define <vscale x 1 x i64>  @intrinsic_vwmacc_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv1i64_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -551,7 +551,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmacc.mask.nxv1i64.nxv1i32(
 define <vscale x 1 x i64>  @intrinsic_vwmacc_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv1i64_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -574,7 +574,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmacc.nxv2i64.nxv2i32(
 define <vscale x 2 x i64>  @intrinsic_vwmacc_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv2i64_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v16, v18, v19
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -597,7 +597,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmacc.mask.nxv2i64.nxv2i32(
 define <vscale x 2 x i64>  @intrinsic_vwmacc_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv2i64_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v16, v18, v19, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -620,7 +620,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmacc.nxv4i64.nxv4i32(
 define <vscale x 4 x i64>  @intrinsic_vwmacc_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv4i64_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v16, v20, v22
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -643,7 +643,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmacc.mask.nxv4i64.nxv4i32(
 define <vscale x 4 x i64>  @intrinsic_vwmacc_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv4i64_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v16, v20, v22, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -669,7 +669,7 @@ define <vscale x 8 x i64>  @intrinsic_vwmacc_vv_nxv8i64_nxv8i32_nxv8i32(<vscale
 ; CHECK-NEXT:    vsetvli a3, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a1)
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v16, v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -695,7 +695,7 @@ define <vscale x 8 x i64>  @intrinsic_vwmacc_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vs
 ; CHECK-NEXT:    vsetvli a3, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a1)
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v16, v8, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -718,7 +718,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.i8(
 define <vscale x 1 x i16>  @intrinsic_vwmacc_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv1i16_i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v16, a0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -741,7 +741,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.i8(
 define <vscale x 1 x i16> @intrinsic_vwmacc_mask_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv1i16_i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v16, a0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -764,7 +764,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmacc.nxv2i16.i8(
 define <vscale x 2 x i16>  @intrinsic_vwmacc_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv2i16_i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v16, a0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -787,7 +787,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmacc.mask.nxv2i16.i8(
 define <vscale x 2 x i16> @intrinsic_vwmacc_mask_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv2i16_i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v16, a0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -810,7 +810,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmacc.nxv4i16.i8(
 define <vscale x 4 x i16>  @intrinsic_vwmacc_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv4i16_i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v16, a0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -833,7 +833,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmacc.mask.nxv4i16.i8(
 define <vscale x 4 x i16> @intrinsic_vwmacc_mask_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv4i16_i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v16, a0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -856,7 +856,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmacc.nxv8i16.i8(
 define <vscale x 8 x i16>  @intrinsic_vwmacc_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv8i16_i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v16, a0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -879,7 +879,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmacc.mask.nxv8i16.i8(
 define <vscale x 8 x i16> @intrinsic_vwmacc_mask_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv8i16_i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v16, a0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -902,7 +902,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmacc.nxv16i16.i8(
 define <vscale x 16 x i16>  @intrinsic_vwmacc_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv16i16_i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v16, a0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -925,7 +925,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmacc.mask.nxv16i16.i8(
 define <vscale x 16 x i16> @intrinsic_vwmacc_mask_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv16i16_i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v16, a0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -950,7 +950,7 @@ define <vscale x 32 x i16>  @intrinsic_vwmacc_vx_nxv32i16_i8_nxv32i8(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a3, zero, e8,m4,ta,mu
 ; CHECK-NEXT:    vle8.v v28, (a1)
-; CHECK-NEXT:    vsetvli a1, a2, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a2, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v16, a0, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -975,7 +975,7 @@ define <vscale x 32 x i16> @intrinsic_vwmacc_mask_vx_nxv32i16_i8_nxv32i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a3, zero, e8,m4,ta,mu
 ; CHECK-NEXT:    vle8.v v28, (a1)
-; CHECK-NEXT:    vsetvli a1, a2, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a2, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v16, a0, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -998,7 +998,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmacc.nxv1i32.i16(
 define <vscale x 1 x i32>  @intrinsic_vwmacc_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv1i32_i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v16, a0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1021,7 +1021,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmacc.mask.nxv1i32.i16(
 define <vscale x 1 x i32> @intrinsic_vwmacc_mask_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv1i32_i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v16, a0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1044,7 +1044,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmacc.nxv2i32.i16(
 define <vscale x 2 x i32>  @intrinsic_vwmacc_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv2i32_i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v16, a0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1067,7 +1067,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmacc.mask.nxv2i32.i16(
 define <vscale x 2 x i32> @intrinsic_vwmacc_mask_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv2i32_i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v16, a0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1090,7 +1090,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmacc.nxv4i32.i16(
 define <vscale x 4 x i32>  @intrinsic_vwmacc_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv4i32_i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v16, a0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1113,7 +1113,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmacc.mask.nxv4i32.i16(
 define <vscale x 4 x i32> @intrinsic_vwmacc_mask_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv4i32_i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v16, a0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1136,7 +1136,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmacc.nxv8i32.i16(
 define <vscale x 8 x i32>  @intrinsic_vwmacc_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv8i32_i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v16, a0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1159,7 +1159,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmacc.mask.nxv8i32.i16(
 define <vscale x 8 x i32> @intrinsic_vwmacc_mask_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv8i32_i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v16, a0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1184,7 +1184,7 @@ define <vscale x 16 x i32>  @intrinsic_vwmacc_vx_nxv16i32_i16_nxv16i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a1)
-; CHECK-NEXT:    vsetvli a1, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a2, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v16, a0, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1209,7 +1209,7 @@ define <vscale x 16 x i32> @intrinsic_vwmacc_mask_vx_nxv16i32_i16_nxv16i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a1)
-; CHECK-NEXT:    vsetvli a1, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a2, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v16, a0, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1232,7 +1232,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmacc.nxv1i64.i32(
 define <vscale x 1 x i64>  @intrinsic_vwmacc_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv1i64_i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v16, a0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1255,7 +1255,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmacc.mask.nxv1i64.i32(
 define <vscale x 1 x i64> @intrinsic_vwmacc_mask_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv1i64_i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v16, a0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1278,7 +1278,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmacc.nxv2i64.i32(
 define <vscale x 2 x i64>  @intrinsic_vwmacc_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv2i64_i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v16, a0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1301,7 +1301,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmacc.mask.nxv2i64.i32(
 define <vscale x 2 x i64> @intrinsic_vwmacc_mask_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv2i64_i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v16, a0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1324,7 +1324,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmacc.nxv4i64.i32(
 define <vscale x 4 x i64>  @intrinsic_vwmacc_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv4i64_i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v16, a0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1347,7 +1347,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmacc.mask.nxv4i64.i32(
 define <vscale x 4 x i64> @intrinsic_vwmacc_mask_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv4i64_i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v16, a0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1372,7 +1372,7 @@ define <vscale x 8 x i64>  @intrinsic_vwmacc_vx_nxv8i64_i32_nxv8i32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a3, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a1)
-; CHECK-NEXT:    vsetvli a1, a2, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a2, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v16, a0, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1397,7 +1397,7 @@ define <vscale x 8 x i64> @intrinsic_vwmacc_mask_vx_nxv8i64_i32_nxv8i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a3, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a1)
-; CHECK-NEXT:    vsetvli a1, a2, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a2, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v16, a0, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv32.ll
index 2d39ba95db2e..701e7f73248f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv32.ll
@@ -10,7 +10,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.nxv1i8(
 define <vscale x 1 x i16>  @intrinsic_vwmaccsu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv1i16_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmaccsu.mask.nxv1i16.nxv1i8(
 define <vscale x 1 x i16>  @intrinsic_vwmaccsu_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv1i16_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmaccsu.nxv2i16.nxv2i8(
 define <vscale x 2 x i16>  @intrinsic_vwmaccsu_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv2i16_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmaccsu.mask.nxv2i16.nxv2i8(
 define <vscale x 2 x i16>  @intrinsic_vwmaccsu_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv2i16_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmaccsu.nxv4i16.nxv4i8(
 define <vscale x 4 x i16>  @intrinsic_vwmaccsu_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv4i16_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmaccsu.mask.nxv4i16.nxv4i8(
 define <vscale x 4 x i16>  @intrinsic_vwmaccsu_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv4i16_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmaccsu.nxv8i16.nxv8i8(
 define <vscale x 8 x i16>  @intrinsic_vwmaccsu_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv8i16_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v16, v18, v19
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmaccsu.mask.nxv8i16.nxv8i8(
 define <vscale x 8 x i16>  @intrinsic_vwmaccsu_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv8i16_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v16, v18, v19, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -194,7 +194,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmaccsu.nxv16i16.nxv16i8(
 define <vscale x 16 x i16>  @intrinsic_vwmaccsu_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv16i16_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v16, v20, v22
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -217,7 +217,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmaccsu.mask.nxv16i16.nxv16i8(
 define <vscale x 16 x i16>  @intrinsic_vwmaccsu_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv16i16_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v16, v20, v22, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -243,7 +243,7 @@ define <vscale x 32 x i16>  @intrinsic_vwmaccsu_vv_nxv32i16_nxv32i8_nxv32i8(<vsc
 ; CHECK-NEXT:    vsetvli a3, zero, e8,m4,ta,mu
 ; CHECK-NEXT:    vle8.v v28, (a1)
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v16, v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -269,7 +269,7 @@ define <vscale x 32 x i16>  @intrinsic_vwmaccsu_mask_vv_nxv32i16_nxv32i8_nxv32i8
 ; CHECK-NEXT:    vsetvli a3, zero, e8,m4,ta,mu
 ; CHECK-NEXT:    vle8.v v28, (a1)
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v16, v8, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -292,7 +292,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmaccsu.nxv1i32.nxv1i16(
 define <vscale x 1 x i32>  @intrinsic_vwmaccsu_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv1i32_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -315,7 +315,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmaccsu.mask.nxv1i32.nxv1i16(
 define <vscale x 1 x i32>  @intrinsic_vwmaccsu_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv1i32_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -338,7 +338,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmaccsu.nxv2i32.nxv2i16(
 define <vscale x 2 x i32>  @intrinsic_vwmaccsu_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv2i32_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -361,7 +361,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmaccsu.mask.nxv2i32.nxv2i16(
 define <vscale x 2 x i32>  @intrinsic_vwmaccsu_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv2i32_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -384,7 +384,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmaccsu.nxv4i32.nxv4i16(
 define <vscale x 4 x i32>  @intrinsic_vwmaccsu_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv4i32_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v16, v18, v19
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -407,7 +407,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmaccsu.mask.nxv4i32.nxv4i16(
 define <vscale x 4 x i32>  @intrinsic_vwmaccsu_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv4i32_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v16, v18, v19, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -430,7 +430,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmaccsu.nxv8i32.nxv8i16(
 define <vscale x 8 x i32>  @intrinsic_vwmaccsu_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv8i32_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v16, v20, v22
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -453,7 +453,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmaccsu.mask.nxv8i32.nxv8i16(
 define <vscale x 8 x i32>  @intrinsic_vwmaccsu_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv8i32_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v16, v20, v22, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -479,7 +479,7 @@ define <vscale x 16 x i32>  @intrinsic_vwmaccsu_vv_nxv16i32_nxv16i16_nxv16i16(<v
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a1)
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v16, v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -505,7 +505,7 @@ define <vscale x 16 x i32>  @intrinsic_vwmaccsu_mask_vv_nxv16i32_nxv16i16_nxv16i
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a1)
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v16, v8, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -528,7 +528,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.i8(
 define <vscale x 1 x i16>  @intrinsic_vwmaccsu_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv1i16_i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v16, a0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -551,7 +551,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmaccsu.mask.nxv1i16.i8(
 define <vscale x 1 x i16> @intrinsic_vwmaccsu_mask_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv1i16_i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v16, a0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -574,7 +574,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmaccsu.nxv2i16.i8(
 define <vscale x 2 x i16>  @intrinsic_vwmaccsu_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv2i16_i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v16, a0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -597,7 +597,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmaccsu.mask.nxv2i16.i8(
 define <vscale x 2 x i16> @intrinsic_vwmaccsu_mask_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv2i16_i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v16, a0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -620,7 +620,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmaccsu.nxv4i16.i8(
 define <vscale x 4 x i16>  @intrinsic_vwmaccsu_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv4i16_i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v16, a0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -643,7 +643,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmaccsu.mask.nxv4i16.i8(
 define <vscale x 4 x i16> @intrinsic_vwmaccsu_mask_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv4i16_i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v16, a0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -666,7 +666,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmaccsu.nxv8i16.i8(
 define <vscale x 8 x i16>  @intrinsic_vwmaccsu_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv8i16_i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v16, a0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -689,7 +689,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmaccsu.mask.nxv8i16.i8(
 define <vscale x 8 x i16> @intrinsic_vwmaccsu_mask_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv8i16_i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v16, a0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -712,7 +712,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmaccsu.nxv16i16.i8(
 define <vscale x 16 x i16>  @intrinsic_vwmaccsu_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv16i16_i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v16, a0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -735,7 +735,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmaccsu.mask.nxv16i16.i8(
 define <vscale x 16 x i16> @intrinsic_vwmaccsu_mask_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv16i16_i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v16, a0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -760,7 +760,7 @@ define <vscale x 32 x i16>  @intrinsic_vwmaccsu_vx_nxv32i16_i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a3, zero, e8,m4,ta,mu
 ; CHECK-NEXT:    vle8.v v28, (a1)
-; CHECK-NEXT:    vsetvli a1, a2, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a2, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v16, a0, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -785,7 +785,7 @@ define <vscale x 32 x i16> @intrinsic_vwmaccsu_mask_vx_nxv32i16_i8_nxv32i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a3, zero, e8,m4,ta,mu
 ; CHECK-NEXT:    vle8.v v28, (a1)
-; CHECK-NEXT:    vsetvli a1, a2, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a2, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v16, a0, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -808,7 +808,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmaccsu.nxv1i32.i16(
 define <vscale x 1 x i32>  @intrinsic_vwmaccsu_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv1i32_i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v16, a0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -831,7 +831,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmaccsu.mask.nxv1i32.i16(
 define <vscale x 1 x i32> @intrinsic_vwmaccsu_mask_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv1i32_i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v16, a0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -854,7 +854,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmaccsu.nxv2i32.i16(
 define <vscale x 2 x i32>  @intrinsic_vwmaccsu_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv2i32_i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v16, a0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -877,7 +877,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmaccsu.mask.nxv2i32.i16(
 define <vscale x 2 x i32> @intrinsic_vwmaccsu_mask_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv2i32_i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v16, a0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -900,7 +900,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmaccsu.nxv4i32.i16(
 define <vscale x 4 x i32>  @intrinsic_vwmaccsu_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv4i32_i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v16, a0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -923,7 +923,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmaccsu.mask.nxv4i32.i16(
 define <vscale x 4 x i32> @intrinsic_vwmaccsu_mask_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv4i32_i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v16, a0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -946,7 +946,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmaccsu.nxv8i32.i16(
 define <vscale x 8 x i32>  @intrinsic_vwmaccsu_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv8i32_i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v16, a0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -969,7 +969,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmaccsu.mask.nxv8i32.i16(
 define <vscale x 8 x i32> @intrinsic_vwmaccsu_mask_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv8i32_i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v16, a0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -994,7 +994,7 @@ define <vscale x 16 x i32>  @intrinsic_vwmaccsu_vx_nxv16i32_i16_nxv16i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a1)
-; CHECK-NEXT:    vsetvli a1, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a2, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v16, a0, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1019,7 +1019,7 @@ define <vscale x 16 x i32> @intrinsic_vwmaccsu_mask_vx_nxv16i32_i16_nxv16i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a1)
-; CHECK-NEXT:    vsetvli a1, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a2, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v16, a0, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv64.ll
index c274246b7b53..4e7a1564717b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv64.ll
@@ -10,7 +10,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.nxv1i8(
 define <vscale x 1 x i16>  @intrinsic_vwmaccsu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv1i16_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmaccsu.mask.nxv1i16.nxv1i8(
 define <vscale x 1 x i16>  @intrinsic_vwmaccsu_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv1i16_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmaccsu.nxv2i16.nxv2i8(
 define <vscale x 2 x i16>  @intrinsic_vwmaccsu_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv2i16_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmaccsu.mask.nxv2i16.nxv2i8(
 define <vscale x 2 x i16>  @intrinsic_vwmaccsu_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv2i16_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmaccsu.nxv4i16.nxv4i8(
 define <vscale x 4 x i16>  @intrinsic_vwmaccsu_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv4i16_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmaccsu.mask.nxv4i16.nxv4i8(
 define <vscale x 4 x i16>  @intrinsic_vwmaccsu_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv4i16_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmaccsu.nxv8i16.nxv8i8(
 define <vscale x 8 x i16>  @intrinsic_vwmaccsu_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv8i16_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v16, v18, v19
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmaccsu.mask.nxv8i16.nxv8i8(
 define <vscale x 8 x i16>  @intrinsic_vwmaccsu_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv8i16_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v16, v18, v19, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -194,7 +194,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmaccsu.nxv16i16.nxv16i8(
 define <vscale x 16 x i16>  @intrinsic_vwmaccsu_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv16i16_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v16, v20, v22
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -217,7 +217,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmaccsu.mask.nxv16i16.nxv16i8(
 define <vscale x 16 x i16>  @intrinsic_vwmaccsu_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv16i16_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v16, v20, v22, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -243,7 +243,7 @@ define <vscale x 32 x i16>  @intrinsic_vwmaccsu_vv_nxv32i16_nxv32i8_nxv32i8(<vsc
 ; CHECK-NEXT:    vsetvli a3, zero, e8,m4,ta,mu
 ; CHECK-NEXT:    vle8.v v28, (a1)
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v16, v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -269,7 +269,7 @@ define <vscale x 32 x i16>  @intrinsic_vwmaccsu_mask_vv_nxv32i16_nxv32i8_nxv32i8
 ; CHECK-NEXT:    vsetvli a3, zero, e8,m4,ta,mu
 ; CHECK-NEXT:    vle8.v v28, (a1)
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v16, v8, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -292,7 +292,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmaccsu.nxv1i32.nxv1i16(
 define <vscale x 1 x i32>  @intrinsic_vwmaccsu_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv1i32_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -315,7 +315,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmaccsu.mask.nxv1i32.nxv1i16(
 define <vscale x 1 x i32>  @intrinsic_vwmaccsu_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv1i32_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -338,7 +338,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmaccsu.nxv2i32.nxv2i16(
 define <vscale x 2 x i32>  @intrinsic_vwmaccsu_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv2i32_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -361,7 +361,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmaccsu.mask.nxv2i32.nxv2i16(
 define <vscale x 2 x i32>  @intrinsic_vwmaccsu_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv2i32_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -384,7 +384,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmaccsu.nxv4i32.nxv4i16(
 define <vscale x 4 x i32>  @intrinsic_vwmaccsu_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv4i32_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v16, v18, v19
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -407,7 +407,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmaccsu.mask.nxv4i32.nxv4i16(
 define <vscale x 4 x i32>  @intrinsic_vwmaccsu_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv4i32_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v16, v18, v19, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -430,7 +430,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmaccsu.nxv8i32.nxv8i16(
 define <vscale x 8 x i32>  @intrinsic_vwmaccsu_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv8i32_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v16, v20, v22
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -453,7 +453,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmaccsu.mask.nxv8i32.nxv8i16(
 define <vscale x 8 x i32>  @intrinsic_vwmaccsu_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv8i32_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v16, v20, v22, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -479,7 +479,7 @@ define <vscale x 16 x i32>  @intrinsic_vwmaccsu_vv_nxv16i32_nxv16i16_nxv16i16(<v
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a1)
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v16, v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -505,7 +505,7 @@ define <vscale x 16 x i32>  @intrinsic_vwmaccsu_mask_vv_nxv16i32_nxv16i16_nxv16i
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a1)
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v16, v8, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -528,7 +528,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmaccsu.nxv1i64.nxv1i32(
 define <vscale x 1 x i64>  @intrinsic_vwmaccsu_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv1i64_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -551,7 +551,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmaccsu.mask.nxv1i64.nxv1i32(
 define <vscale x 1 x i64>  @intrinsic_vwmaccsu_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv1i64_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -574,7 +574,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmaccsu.nxv2i64.nxv2i32(
 define <vscale x 2 x i64>  @intrinsic_vwmaccsu_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv2i64_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v16, v18, v19
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -597,7 +597,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmaccsu.mask.nxv2i64.nxv2i32(
 define <vscale x 2 x i64>  @intrinsic_vwmaccsu_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv2i64_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v16, v18, v19, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -620,7 +620,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmaccsu.nxv4i64.nxv4i32(
 define <vscale x 4 x i64>  @intrinsic_vwmaccsu_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv4i64_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v16, v20, v22
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -643,7 +643,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmaccsu.mask.nxv4i64.nxv4i32(
 define <vscale x 4 x i64>  @intrinsic_vwmaccsu_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv4i64_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v16, v20, v22, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -669,7 +669,7 @@ define <vscale x 8 x i64>  @intrinsic_vwmaccsu_vv_nxv8i64_nxv8i32_nxv8i32(<vscal
 ; CHECK-NEXT:    vsetvli a3, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a1)
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v16, v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -695,7 +695,7 @@ define <vscale x 8 x i64>  @intrinsic_vwmaccsu_mask_vv_nxv8i64_nxv8i32_nxv8i32(<
 ; CHECK-NEXT:    vsetvli a3, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a1)
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v16, v8, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -718,7 +718,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.i8(
 define <vscale x 1 x i16>  @intrinsic_vwmaccsu_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv1i16_i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v16, a0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -741,7 +741,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmaccsu.mask.nxv1i16.i8(
 define <vscale x 1 x i16> @intrinsic_vwmaccsu_mask_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv1i16_i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v16, a0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -764,7 +764,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmaccsu.nxv2i16.i8(
 define <vscale x 2 x i16>  @intrinsic_vwmaccsu_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv2i16_i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v16, a0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -787,7 +787,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmaccsu.mask.nxv2i16.i8(
 define <vscale x 2 x i16> @intrinsic_vwmaccsu_mask_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv2i16_i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v16, a0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -810,7 +810,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmaccsu.nxv4i16.i8(
 define <vscale x 4 x i16>  @intrinsic_vwmaccsu_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv4i16_i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v16, a0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -833,7 +833,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmaccsu.mask.nxv4i16.i8(
 define <vscale x 4 x i16> @intrinsic_vwmaccsu_mask_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv4i16_i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v16, a0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -856,7 +856,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmaccsu.nxv8i16.i8(
 define <vscale x 8 x i16>  @intrinsic_vwmaccsu_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv8i16_i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v16, a0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -879,7 +879,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmaccsu.mask.nxv8i16.i8(
 define <vscale x 8 x i16> @intrinsic_vwmaccsu_mask_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv8i16_i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v16, a0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -902,7 +902,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmaccsu.nxv16i16.i8(
 define <vscale x 16 x i16>  @intrinsic_vwmaccsu_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv16i16_i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v16, a0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -925,7 +925,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmaccsu.mask.nxv16i16.i8(
 define <vscale x 16 x i16> @intrinsic_vwmaccsu_mask_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv16i16_i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v16, a0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -950,7 +950,7 @@ define <vscale x 32 x i16>  @intrinsic_vwmaccsu_vx_nxv32i16_i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a3, zero, e8,m4,ta,mu
 ; CHECK-NEXT:    vle8.v v28, (a1)
-; CHECK-NEXT:    vsetvli a1, a2, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a2, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v16, a0, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -975,7 +975,7 @@ define <vscale x 32 x i16> @intrinsic_vwmaccsu_mask_vx_nxv32i16_i8_nxv32i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a3, zero, e8,m4,ta,mu
 ; CHECK-NEXT:    vle8.v v28, (a1)
-; CHECK-NEXT:    vsetvli a1, a2, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a2, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v16, a0, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -998,7 +998,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmaccsu.nxv1i32.i16(
 define <vscale x 1 x i32>  @intrinsic_vwmaccsu_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv1i32_i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v16, a0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1021,7 +1021,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmaccsu.mask.nxv1i32.i16(
 define <vscale x 1 x i32> @intrinsic_vwmaccsu_mask_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv1i32_i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v16, a0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1044,7 +1044,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmaccsu.nxv2i32.i16(
 define <vscale x 2 x i32>  @intrinsic_vwmaccsu_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv2i32_i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v16, a0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1067,7 +1067,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmaccsu.mask.nxv2i32.i16(
 define <vscale x 2 x i32> @intrinsic_vwmaccsu_mask_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv2i32_i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v16, a0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1090,7 +1090,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmaccsu.nxv4i32.i16(
 define <vscale x 4 x i32>  @intrinsic_vwmaccsu_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv4i32_i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v16, a0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1113,7 +1113,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmaccsu.mask.nxv4i32.i16(
 define <vscale x 4 x i32> @intrinsic_vwmaccsu_mask_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv4i32_i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v16, a0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1136,7 +1136,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmaccsu.nxv8i32.i16(
 define <vscale x 8 x i32>  @intrinsic_vwmaccsu_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv8i32_i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v16, a0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1159,7 +1159,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmaccsu.mask.nxv8i32.i16(
 define <vscale x 8 x i32> @intrinsic_vwmaccsu_mask_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv8i32_i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v16, a0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1184,7 +1184,7 @@ define <vscale x 16 x i32>  @intrinsic_vwmaccsu_vx_nxv16i32_i16_nxv16i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a1)
-; CHECK-NEXT:    vsetvli a1, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a2, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v16, a0, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1209,7 +1209,7 @@ define <vscale x 16 x i32> @intrinsic_vwmaccsu_mask_vx_nxv16i32_i16_nxv16i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a1)
-; CHECK-NEXT:    vsetvli a1, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a2, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v16, a0, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1232,7 +1232,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmaccsu.nxv1i64.i32(
 define <vscale x 1 x i64>  @intrinsic_vwmaccsu_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv1i64_i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v16, a0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1255,7 +1255,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmaccsu.mask.nxv1i64.i32(
 define <vscale x 1 x i64> @intrinsic_vwmaccsu_mask_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv1i64_i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v16, a0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1278,7 +1278,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmaccsu.nxv2i64.i32(
 define <vscale x 2 x i64>  @intrinsic_vwmaccsu_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv2i64_i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v16, a0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1301,7 +1301,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmaccsu.mask.nxv2i64.i32(
 define <vscale x 2 x i64> @intrinsic_vwmaccsu_mask_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv2i64_i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v16, a0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1324,7 +1324,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmaccsu.nxv4i64.i32(
 define <vscale x 4 x i64>  @intrinsic_vwmaccsu_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv4i64_i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v16, a0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1347,7 +1347,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmaccsu.mask.nxv4i64.i32(
 define <vscale x 4 x i64> @intrinsic_vwmaccsu_mask_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv4i64_i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v16, a0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1372,7 +1372,7 @@ define <vscale x 8 x i64>  @intrinsic_vwmaccsu_vx_nxv8i64_i32_nxv8i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a3, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a1)
-; CHECK-NEXT:    vsetvli a1, a2, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a2, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v16, a0, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1397,7 +1397,7 @@ define <vscale x 8 x i64> @intrinsic_vwmaccsu_mask_vx_nxv8i64_i32_nxv8i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a3, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a1)
-; CHECK-NEXT:    vsetvli a1, a2, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a2, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v16, a0, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv32.ll
index 2bc594e82d6b..bad8860e5856 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv32.ll
@@ -10,7 +10,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.nxv1i8(
 define <vscale x 1 x i16>  @intrinsic_vwmaccu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv1i16_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmaccu.mask.nxv1i16.nxv1i8(
 define <vscale x 1 x i16>  @intrinsic_vwmaccu_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv1i16_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmaccu.nxv2i16.nxv2i8(
 define <vscale x 2 x i16>  @intrinsic_vwmaccu_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv2i16_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmaccu.mask.nxv2i16.nxv2i8(
 define <vscale x 2 x i16>  @intrinsic_vwmaccu_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv2i16_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmaccu.nxv4i16.nxv4i8(
 define <vscale x 4 x i16>  @intrinsic_vwmaccu_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv4i16_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmaccu.mask.nxv4i16.nxv4i8(
 define <vscale x 4 x i16>  @intrinsic_vwmaccu_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv4i16_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmaccu.nxv8i16.nxv8i8(
 define <vscale x 8 x i16>  @intrinsic_vwmaccu_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv8i16_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v16, v18, v19
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmaccu.mask.nxv8i16.nxv8i8(
 define <vscale x 8 x i16>  @intrinsic_vwmaccu_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv8i16_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v16, v18, v19, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -194,7 +194,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmaccu.nxv16i16.nxv16i8(
 define <vscale x 16 x i16>  @intrinsic_vwmaccu_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv16i16_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v16, v20, v22
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -217,7 +217,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmaccu.mask.nxv16i16.nxv16i8(
 define <vscale x 16 x i16>  @intrinsic_vwmaccu_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv16i16_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v16, v20, v22, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -243,7 +243,7 @@ define <vscale x 32 x i16>  @intrinsic_vwmaccu_vv_nxv32i16_nxv32i8_nxv32i8(<vsca
 ; CHECK-NEXT:    vsetvli a3, zero, e8,m4,ta,mu
 ; CHECK-NEXT:    vle8.v v28, (a1)
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v16, v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -269,7 +269,7 @@ define <vscale x 32 x i16>  @intrinsic_vwmaccu_mask_vv_nxv32i16_nxv32i8_nxv32i8(
 ; CHECK-NEXT:    vsetvli a3, zero, e8,m4,ta,mu
 ; CHECK-NEXT:    vle8.v v28, (a1)
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v16, v8, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -292,7 +292,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmaccu.nxv1i32.nxv1i16(
 define <vscale x 1 x i32>  @intrinsic_vwmaccu_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv1i32_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -315,7 +315,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmaccu.mask.nxv1i32.nxv1i16(
 define <vscale x 1 x i32>  @intrinsic_vwmaccu_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv1i32_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -338,7 +338,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmaccu.nxv2i32.nxv2i16(
 define <vscale x 2 x i32>  @intrinsic_vwmaccu_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv2i32_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -361,7 +361,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmaccu.mask.nxv2i32.nxv2i16(
 define <vscale x 2 x i32>  @intrinsic_vwmaccu_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv2i32_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -384,7 +384,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.nxv4i16(
 define <vscale x 4 x i32>  @intrinsic_vwmaccu_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv4i32_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v16, v18, v19
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -407,7 +407,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmaccu.mask.nxv4i32.nxv4i16(
 define <vscale x 4 x i32>  @intrinsic_vwmaccu_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv4i32_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v16, v18, v19, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -430,7 +430,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmaccu.nxv8i32.nxv8i16(
 define <vscale x 8 x i32>  @intrinsic_vwmaccu_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv8i32_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v16, v20, v22
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -453,7 +453,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmaccu.mask.nxv8i32.nxv8i16(
 define <vscale x 8 x i32>  @intrinsic_vwmaccu_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv8i32_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v16, v20, v22, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -479,7 +479,7 @@ define <vscale x 16 x i32>  @intrinsic_vwmaccu_vv_nxv16i32_nxv16i16_nxv16i16(<vs
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a1)
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v16, v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -505,7 +505,7 @@ define <vscale x 16 x i32>  @intrinsic_vwmaccu_mask_vv_nxv16i32_nxv16i16_nxv16i1
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a1)
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v16, v8, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -528,7 +528,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.i8(
 define <vscale x 1 x i16>  @intrinsic_vwmaccu_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv1i16_i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v16, a0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -551,7 +551,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmaccu.mask.nxv1i16.i8(
 define <vscale x 1 x i16> @intrinsic_vwmaccu_mask_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv1i16_i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v16, a0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -574,7 +574,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmaccu.nxv2i16.i8(
 define <vscale x 2 x i16>  @intrinsic_vwmaccu_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv2i16_i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v16, a0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -597,7 +597,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmaccu.mask.nxv2i16.i8(
 define <vscale x 2 x i16> @intrinsic_vwmaccu_mask_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv2i16_i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v16, a0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -620,7 +620,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmaccu.nxv4i16.i8(
 define <vscale x 4 x i16>  @intrinsic_vwmaccu_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv4i16_i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v16, a0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -643,7 +643,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmaccu.mask.nxv4i16.i8(
 define <vscale x 4 x i16> @intrinsic_vwmaccu_mask_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv4i16_i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v16, a0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -666,7 +666,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmaccu.nxv8i16.i8(
 define <vscale x 8 x i16>  @intrinsic_vwmaccu_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv8i16_i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v16, a0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -689,7 +689,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmaccu.mask.nxv8i16.i8(
 define <vscale x 8 x i16> @intrinsic_vwmaccu_mask_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv8i16_i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v16, a0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -712,7 +712,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmaccu.nxv16i16.i8(
 define <vscale x 16 x i16>  @intrinsic_vwmaccu_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv16i16_i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v16, a0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -735,7 +735,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmaccu.mask.nxv16i16.i8(
 define <vscale x 16 x i16> @intrinsic_vwmaccu_mask_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv16i16_i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v16, a0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -760,7 +760,7 @@ define <vscale x 32 x i16>  @intrinsic_vwmaccu_vx_nxv32i16_i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a3, zero, e8,m4,ta,mu
 ; CHECK-NEXT:    vle8.v v28, (a1)
-; CHECK-NEXT:    vsetvli a1, a2, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a2, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v16, a0, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -785,7 +785,7 @@ define <vscale x 32 x i16> @intrinsic_vwmaccu_mask_vx_nxv32i16_i8_nxv32i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a3, zero, e8,m4,ta,mu
 ; CHECK-NEXT:    vle8.v v28, (a1)
-; CHECK-NEXT:    vsetvli a1, a2, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a2, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v16, a0, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -808,7 +808,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmaccu.nxv1i32.i16(
 define <vscale x 1 x i32>  @intrinsic_vwmaccu_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv1i32_i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v16, a0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -831,7 +831,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmaccu.mask.nxv1i32.i16(
 define <vscale x 1 x i32> @intrinsic_vwmaccu_mask_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv1i32_i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v16, a0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -854,7 +854,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmaccu.nxv2i32.i16(
 define <vscale x 2 x i32>  @intrinsic_vwmaccu_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv2i32_i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v16, a0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -877,7 +877,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmaccu.mask.nxv2i32.i16(
 define <vscale x 2 x i32> @intrinsic_vwmaccu_mask_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv2i32_i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v16, a0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -900,7 +900,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.i16(
 define <vscale x 4 x i32>  @intrinsic_vwmaccu_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv4i32_i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v16, a0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -923,7 +923,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmaccu.mask.nxv4i32.i16(
 define <vscale x 4 x i32> @intrinsic_vwmaccu_mask_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv4i32_i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v16, a0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -946,7 +946,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmaccu.nxv8i32.i16(
 define <vscale x 8 x i32>  @intrinsic_vwmaccu_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv8i32_i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v16, a0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -969,7 +969,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmaccu.mask.nxv8i32.i16(
 define <vscale x 8 x i32> @intrinsic_vwmaccu_mask_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv8i32_i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v16, a0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -994,7 +994,7 @@ define <vscale x 16 x i32>  @intrinsic_vwmaccu_vx_nxv16i32_i16_nxv16i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a1)
-; CHECK-NEXT:    vsetvli a1, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a2, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v16, a0, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1019,7 +1019,7 @@ define <vscale x 16 x i32> @intrinsic_vwmaccu_mask_vx_nxv16i32_i16_nxv16i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a1)
-; CHECK-NEXT:    vsetvli a1, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a2, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v16, a0, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv64.ll
index be5d1779a22c..9d7289ee398c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv64.ll
@@ -10,7 +10,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.nxv1i8(
 define <vscale x 1 x i16>  @intrinsic_vwmaccu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv1i16_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmaccu.mask.nxv1i16.nxv1i8(
 define <vscale x 1 x i16>  @intrinsic_vwmaccu_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv1i16_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmaccu.nxv2i16.nxv2i8(
 define <vscale x 2 x i16>  @intrinsic_vwmaccu_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv2i16_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmaccu.mask.nxv2i16.nxv2i8(
 define <vscale x 2 x i16>  @intrinsic_vwmaccu_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv2i16_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmaccu.nxv4i16.nxv4i8(
 define <vscale x 4 x i16>  @intrinsic_vwmaccu_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv4i16_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmaccu.mask.nxv4i16.nxv4i8(
 define <vscale x 4 x i16>  @intrinsic_vwmaccu_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv4i16_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmaccu.nxv8i16.nxv8i8(
 define <vscale x 8 x i16>  @intrinsic_vwmaccu_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv8i16_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v16, v18, v19
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmaccu.mask.nxv8i16.nxv8i8(
 define <vscale x 8 x i16>  @intrinsic_vwmaccu_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv8i16_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v16, v18, v19, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -194,7 +194,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmaccu.nxv16i16.nxv16i8(
 define <vscale x 16 x i16>  @intrinsic_vwmaccu_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv16i16_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v16, v20, v22
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -217,7 +217,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmaccu.mask.nxv16i16.nxv16i8(
 define <vscale x 16 x i16>  @intrinsic_vwmaccu_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv16i16_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v16, v20, v22, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -243,7 +243,7 @@ define <vscale x 32 x i16>  @intrinsic_vwmaccu_vv_nxv32i16_nxv32i8_nxv32i8(<vsca
 ; CHECK-NEXT:    vsetvli a3, zero, e8,m4,ta,mu
 ; CHECK-NEXT:    vle8.v v28, (a1)
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v16, v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -269,7 +269,7 @@ define <vscale x 32 x i16>  @intrinsic_vwmaccu_mask_vv_nxv32i16_nxv32i8_nxv32i8(
 ; CHECK-NEXT:    vsetvli a3, zero, e8,m4,ta,mu
 ; CHECK-NEXT:    vle8.v v28, (a1)
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v16, v8, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -292,7 +292,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmaccu.nxv1i32.nxv1i16(
 define <vscale x 1 x i32>  @intrinsic_vwmaccu_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv1i32_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -315,7 +315,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmaccu.mask.nxv1i32.nxv1i16(
 define <vscale x 1 x i32>  @intrinsic_vwmaccu_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv1i32_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -338,7 +338,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmaccu.nxv2i32.nxv2i16(
 define <vscale x 2 x i32>  @intrinsic_vwmaccu_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv2i32_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -361,7 +361,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmaccu.mask.nxv2i32.nxv2i16(
 define <vscale x 2 x i32>  @intrinsic_vwmaccu_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv2i32_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -384,7 +384,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.nxv4i16(
 define <vscale x 4 x i32>  @intrinsic_vwmaccu_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv4i32_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v16, v18, v19
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -407,7 +407,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmaccu.mask.nxv4i32.nxv4i16(
 define <vscale x 4 x i32>  @intrinsic_vwmaccu_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv4i32_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v16, v18, v19, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -430,7 +430,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmaccu.nxv8i32.nxv8i16(
 define <vscale x 8 x i32>  @intrinsic_vwmaccu_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv8i32_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v16, v20, v22
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -453,7 +453,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmaccu.mask.nxv8i32.nxv8i16(
 define <vscale x 8 x i32>  @intrinsic_vwmaccu_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv8i32_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v16, v20, v22, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -479,7 +479,7 @@ define <vscale x 16 x i32>  @intrinsic_vwmaccu_vv_nxv16i32_nxv16i16_nxv16i16(<vs
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a1)
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v16, v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -505,7 +505,7 @@ define <vscale x 16 x i32>  @intrinsic_vwmaccu_mask_vv_nxv16i32_nxv16i16_nxv16i1
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a1)
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v16, v8, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -528,7 +528,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.nxv1i32(
 define <vscale x 1 x i64>  @intrinsic_vwmaccu_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv1i64_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v16, v17, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -551,7 +551,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32(
 define <vscale x 1 x i64>  @intrinsic_vwmaccu_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv1i64_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v16, v17, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -574,7 +574,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmaccu.nxv2i64.nxv2i32(
 define <vscale x 2 x i64>  @intrinsic_vwmaccu_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv2i64_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v16, v18, v19
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -597,7 +597,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmaccu.mask.nxv2i64.nxv2i32(
 define <vscale x 2 x i64>  @intrinsic_vwmaccu_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv2i64_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v16, v18, v19, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -620,7 +620,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.nxv4i32(
 define <vscale x 4 x i64>  @intrinsic_vwmaccu_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv4i64_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v16, v20, v22
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -643,7 +643,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmaccu.mask.nxv4i64.nxv4i32(
 define <vscale x 4 x i64>  @intrinsic_vwmaccu_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv4i64_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v16, v20, v22, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -669,7 +669,7 @@ define <vscale x 8 x i64>  @intrinsic_vwmaccu_vv_nxv8i64_nxv8i32_nxv8i32(<vscale
 ; CHECK-NEXT:    vsetvli a3, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a1)
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v16, v8, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -695,7 +695,7 @@ define <vscale x 8 x i64>  @intrinsic_vwmaccu_mask_vv_nxv8i64_nxv8i32_nxv8i32(<v
 ; CHECK-NEXT:    vsetvli a3, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a1)
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, a2, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v16, v8, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -718,7 +718,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.i8(
 define <vscale x 1 x i16>  @intrinsic_vwmaccu_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv1i16_i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v16, a0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -741,7 +741,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmaccu.mask.nxv1i16.i8(
 define <vscale x 1 x i16> @intrinsic_vwmaccu_mask_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv1i16_i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v16, a0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -764,7 +764,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmaccu.nxv2i16.i8(
 define <vscale x 2 x i16>  @intrinsic_vwmaccu_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv2i16_i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v16, a0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -787,7 +787,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmaccu.mask.nxv2i16.i8(
 define <vscale x 2 x i16> @intrinsic_vwmaccu_mask_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv2i16_i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v16, a0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -810,7 +810,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmaccu.nxv4i16.i8(
 define <vscale x 4 x i16>  @intrinsic_vwmaccu_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv4i16_i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v16, a0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -833,7 +833,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmaccu.mask.nxv4i16.i8(
 define <vscale x 4 x i16> @intrinsic_vwmaccu_mask_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv4i16_i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v16, a0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -856,7 +856,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmaccu.nxv8i16.i8(
 define <vscale x 8 x i16>  @intrinsic_vwmaccu_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv8i16_i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v16, a0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -879,7 +879,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmaccu.mask.nxv8i16.i8(
 define <vscale x 8 x i16> @intrinsic_vwmaccu_mask_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv8i16_i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v16, a0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -902,7 +902,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmaccu.nxv16i16.i8(
 define <vscale x 16 x i16>  @intrinsic_vwmaccu_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv16i16_i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v16, a0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -925,7 +925,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmaccu.mask.nxv16i16.i8(
 define <vscale x 16 x i16> @intrinsic_vwmaccu_mask_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv16i16_i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v16, a0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -950,7 +950,7 @@ define <vscale x 32 x i16>  @intrinsic_vwmaccu_vx_nxv32i16_i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a3, zero, e8,m4,ta,mu
 ; CHECK-NEXT:    vle8.v v28, (a1)
-; CHECK-NEXT:    vsetvli a1, a2, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a2, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v16, a0, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -975,7 +975,7 @@ define <vscale x 32 x i16> @intrinsic_vwmaccu_mask_vx_nxv32i16_i8_nxv32i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a3, zero, e8,m4,ta,mu
 ; CHECK-NEXT:    vle8.v v28, (a1)
-; CHECK-NEXT:    vsetvli a1, a2, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a2, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v16, a0, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -998,7 +998,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmaccu.nxv1i32.i16(
 define <vscale x 1 x i32>  @intrinsic_vwmaccu_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv1i32_i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v16, a0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1021,7 +1021,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmaccu.mask.nxv1i32.i16(
 define <vscale x 1 x i32> @intrinsic_vwmaccu_mask_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv1i32_i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v16, a0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1044,7 +1044,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmaccu.nxv2i32.i16(
 define <vscale x 2 x i32>  @intrinsic_vwmaccu_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv2i32_i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v16, a0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1067,7 +1067,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmaccu.mask.nxv2i32.i16(
 define <vscale x 2 x i32> @intrinsic_vwmaccu_mask_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv2i32_i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v16, a0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1090,7 +1090,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.i16(
 define <vscale x 4 x i32>  @intrinsic_vwmaccu_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv4i32_i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v16, a0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1113,7 +1113,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmaccu.mask.nxv4i32.i16(
 define <vscale x 4 x i32> @intrinsic_vwmaccu_mask_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv4i32_i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v16, a0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1136,7 +1136,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmaccu.nxv8i32.i16(
 define <vscale x 8 x i32>  @intrinsic_vwmaccu_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv8i32_i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v16, a0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1159,7 +1159,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmaccu.mask.nxv8i32.i16(
 define <vscale x 8 x i32> @intrinsic_vwmaccu_mask_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv8i32_i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v16, a0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1184,7 +1184,7 @@ define <vscale x 16 x i32>  @intrinsic_vwmaccu_vx_nxv16i32_i16_nxv16i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a1)
-; CHECK-NEXT:    vsetvli a1, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a2, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v16, a0, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1209,7 +1209,7 @@ define <vscale x 16 x i32> @intrinsic_vwmaccu_mask_vx_nxv16i32_i16_nxv16i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a1)
-; CHECK-NEXT:    vsetvli a1, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a2, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v16, a0, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1232,7 +1232,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.i32(
 define <vscale x 1 x i64>  @intrinsic_vwmaccu_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv1i64_i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v16, a0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1255,7 +1255,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.i32(
 define <vscale x 1 x i64> @intrinsic_vwmaccu_mask_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv1i64_i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v16, a0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1278,7 +1278,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmaccu.nxv2i64.i32(
 define <vscale x 2 x i64>  @intrinsic_vwmaccu_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv2i64_i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v16, a0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1301,7 +1301,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmaccu.mask.nxv2i64.i32(
 define <vscale x 2 x i64> @intrinsic_vwmaccu_mask_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv2i64_i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v16, a0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1324,7 +1324,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.i32(
 define <vscale x 4 x i64>  @intrinsic_vwmaccu_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv4i64_i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v16, a0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1347,7 +1347,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmaccu.mask.nxv4i64.i32(
 define <vscale x 4 x i64> @intrinsic_vwmaccu_mask_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv4i64_i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v16, a0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1372,7 +1372,7 @@ define <vscale x 8 x i64>  @intrinsic_vwmaccu_vx_nxv8i64_i32_nxv8i32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a3, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a1)
-; CHECK-NEXT:    vsetvli a1, a2, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a2, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v16, a0, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -1397,7 +1397,7 @@ define <vscale x 8 x i64> @intrinsic_vwmaccu_mask_vx_nxv8i64_i32_nxv8i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a3, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a1)
-; CHECK-NEXT:    vsetvli a1, a2, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a2, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v16, a0, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv32.ll
index b9e0207f381a..30fbfc3dafaa 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv32.ll
@@ -10,7 +10,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmaccus.nxv1i16.i8(
 define <vscale x 1 x i16>  @intrinsic_vwmaccus_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv1i16_i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v16, a0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmaccus.mask.nxv1i16.i8(
 define <vscale x 1 x i16> @intrinsic_vwmaccus_mask_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv1i16_i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v16, a0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmaccus.nxv2i16.i8(
 define <vscale x 2 x i16>  @intrinsic_vwmaccus_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv2i16_i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v16, a0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmaccus.mask.nxv2i16.i8(
 define <vscale x 2 x i16> @intrinsic_vwmaccus_mask_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv2i16_i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v16, a0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmaccus.nxv4i16.i8(
 define <vscale x 4 x i16>  @intrinsic_vwmaccus_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv4i16_i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v16, a0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmaccus.mask.nxv4i16.i8(
 define <vscale x 4 x i16> @intrinsic_vwmaccus_mask_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv4i16_i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v16, a0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmaccus.nxv8i16.i8(
 define <vscale x 8 x i16>  @intrinsic_vwmaccus_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv8i16_i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v16, a0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmaccus.mask.nxv8i16.i8(
 define <vscale x 8 x i16> @intrinsic_vwmaccus_mask_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv8i16_i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v16, a0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -194,7 +194,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmaccus.nxv16i16.i8(
 define <vscale x 16 x i16>  @intrinsic_vwmaccus_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv16i16_i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v16, a0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -217,7 +217,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmaccus.mask.nxv16i16.i8(
 define <vscale x 16 x i16> @intrinsic_vwmaccus_mask_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv16i16_i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v16, a0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -242,7 +242,7 @@ define <vscale x 32 x i16>  @intrinsic_vwmaccus_vx_nxv32i16_i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a3, zero, e8,m4,ta,mu
 ; CHECK-NEXT:    vle8.v v28, (a1)
-; CHECK-NEXT:    vsetvli a1, a2, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a2, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v16, a0, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -267,7 +267,7 @@ define <vscale x 32 x i16> @intrinsic_vwmaccus_mask_vx_nxv32i16_i8_nxv32i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a3, zero, e8,m4,ta,mu
 ; CHECK-NEXT:    vle8.v v28, (a1)
-; CHECK-NEXT:    vsetvli a1, a2, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a2, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v16, a0, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -290,7 +290,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmaccus.nxv1i32.i16(
 define <vscale x 1 x i32>  @intrinsic_vwmaccus_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv1i32_i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v16, a0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -313,7 +313,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmaccus.mask.nxv1i32.i16(
 define <vscale x 1 x i32> @intrinsic_vwmaccus_mask_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv1i32_i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v16, a0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -336,7 +336,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmaccus.nxv2i32.i16(
 define <vscale x 2 x i32>  @intrinsic_vwmaccus_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv2i32_i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v16, a0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -359,7 +359,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmaccus.mask.nxv2i32.i16(
 define <vscale x 2 x i32> @intrinsic_vwmaccus_mask_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv2i32_i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v16, a0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -382,7 +382,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmaccus.nxv4i32.i16(
 define <vscale x 4 x i32>  @intrinsic_vwmaccus_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv4i32_i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v16, a0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -405,7 +405,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmaccus.mask.nxv4i32.i16(
 define <vscale x 4 x i32> @intrinsic_vwmaccus_mask_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv4i32_i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v16, a0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -428,7 +428,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmaccus.nxv8i32.i16(
 define <vscale x 8 x i32>  @intrinsic_vwmaccus_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv8i32_i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v16, a0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -451,7 +451,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmaccus.mask.nxv8i32.i16(
 define <vscale x 8 x i32> @intrinsic_vwmaccus_mask_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv8i32_i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v16, a0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -476,7 +476,7 @@ define <vscale x 16 x i32>  @intrinsic_vwmaccus_vx_nxv16i32_i16_nxv16i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a1)
-; CHECK-NEXT:    vsetvli a1, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a2, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v16, a0, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -501,7 +501,7 @@ define <vscale x 16 x i32> @intrinsic_vwmaccus_mask_vx_nxv16i32_i16_nxv16i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a1)
-; CHECK-NEXT:    vsetvli a1, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a2, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v16, a0, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv64.ll
index 56964b8819d7..f5fe3a51d289 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv64.ll
@@ -10,7 +10,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmaccus.nxv1i16.i8(
 define <vscale x 1 x i16>  @intrinsic_vwmaccus_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv1i16_i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v16, a0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmaccus.mask.nxv1i16.i8(
 define <vscale x 1 x i16> @intrinsic_vwmaccus_mask_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv1i16_i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v16, a0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmaccus.nxv2i16.i8(
 define <vscale x 2 x i16>  @intrinsic_vwmaccus_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv2i16_i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v16, a0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmaccus.mask.nxv2i16.i8(
 define <vscale x 2 x i16> @intrinsic_vwmaccus_mask_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv2i16_i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v16, a0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmaccus.nxv4i16.i8(
 define <vscale x 4 x i16>  @intrinsic_vwmaccus_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv4i16_i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v16, a0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmaccus.mask.nxv4i16.i8(
 define <vscale x 4 x i16> @intrinsic_vwmaccus_mask_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv4i16_i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v16, a0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmaccus.nxv8i16.i8(
 define <vscale x 8 x i16>  @intrinsic_vwmaccus_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv8i16_i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v16, a0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmaccus.mask.nxv8i16.i8(
 define <vscale x 8 x i16> @intrinsic_vwmaccus_mask_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv8i16_i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v16, a0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -194,7 +194,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmaccus.nxv16i16.i8(
 define <vscale x 16 x i16>  @intrinsic_vwmaccus_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv16i16_i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v16, a0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -217,7 +217,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmaccus.mask.nxv16i16.i8(
 define <vscale x 16 x i16> @intrinsic_vwmaccus_mask_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv16i16_i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v16, a0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -242,7 +242,7 @@ define <vscale x 32 x i16>  @intrinsic_vwmaccus_vx_nxv32i16_i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a3, zero, e8,m4,ta,mu
 ; CHECK-NEXT:    vle8.v v28, (a1)
-; CHECK-NEXT:    vsetvli a1, a2, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a2, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v16, a0, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -267,7 +267,7 @@ define <vscale x 32 x i16> @intrinsic_vwmaccus_mask_vx_nxv32i16_i8_nxv32i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a3, zero, e8,m4,ta,mu
 ; CHECK-NEXT:    vle8.v v28, (a1)
-; CHECK-NEXT:    vsetvli a1, a2, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a2, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v16, a0, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -290,7 +290,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmaccus.nxv1i32.i16(
 define <vscale x 1 x i32>  @intrinsic_vwmaccus_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv1i32_i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v16, a0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -313,7 +313,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmaccus.mask.nxv1i32.i16(
 define <vscale x 1 x i32> @intrinsic_vwmaccus_mask_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv1i32_i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v16, a0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -336,7 +336,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmaccus.nxv2i32.i16(
 define <vscale x 2 x i32>  @intrinsic_vwmaccus_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv2i32_i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v16, a0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -359,7 +359,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmaccus.mask.nxv2i32.i16(
 define <vscale x 2 x i32> @intrinsic_vwmaccus_mask_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv2i32_i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v16, a0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -382,7 +382,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmaccus.nxv4i32.i16(
 define <vscale x 4 x i32>  @intrinsic_vwmaccus_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv4i32_i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v16, a0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -405,7 +405,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmaccus.mask.nxv4i32.i16(
 define <vscale x 4 x i32> @intrinsic_vwmaccus_mask_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv4i32_i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v16, a0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -428,7 +428,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmaccus.nxv8i32.i16(
 define <vscale x 8 x i32>  @intrinsic_vwmaccus_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv8i32_i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v16, a0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -451,7 +451,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmaccus.mask.nxv8i32.i16(
 define <vscale x 8 x i32> @intrinsic_vwmaccus_mask_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv8i32_i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v16, a0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -476,7 +476,7 @@ define <vscale x 16 x i32>  @intrinsic_vwmaccus_vx_nxv16i32_i16_nxv16i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a1)
-; CHECK-NEXT:    vsetvli a1, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a2, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v16, a0, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -501,7 +501,7 @@ define <vscale x 16 x i32> @intrinsic_vwmaccus_mask_vx_nxv16i32_i16_nxv16i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a1)
-; CHECK-NEXT:    vsetvli a1, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a2, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v16, a0, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -524,7 +524,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmaccus.nxv1i64.i32(
 define <vscale x 1 x i64>  @intrinsic_vwmaccus_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv1i64_i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v16, a0, v17
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -547,7 +547,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmaccus.mask.nxv1i64.i32(
 define <vscale x 1 x i64> @intrinsic_vwmaccus_mask_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv1i64_i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v16, a0, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -570,7 +570,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmaccus.nxv2i64.i32(
 define <vscale x 2 x i64>  @intrinsic_vwmaccus_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv2i64_i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v16, a0, v18
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -593,7 +593,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmaccus.mask.nxv2i64.i32(
 define <vscale x 2 x i64> @intrinsic_vwmaccus_mask_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv2i64_i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v16, a0, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -616,7 +616,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmaccus.nxv4i64.i32(
 define <vscale x 4 x i64>  @intrinsic_vwmaccus_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv4i64_i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v16, a0, v20
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -639,7 +639,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmaccus.mask.nxv4i64.i32(
 define <vscale x 4 x i64> @intrinsic_vwmaccus_mask_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv4i64_i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v16, a0, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -664,7 +664,7 @@ define <vscale x 8 x i64>  @intrinsic_vwmaccus_vx_nxv8i64_i32_nxv8i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a3, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a1)
-; CHECK-NEXT:    vsetvli a1, a2, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a2, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v16, a0, v28
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
@@ -689,7 +689,7 @@ define <vscale x 8 x i64> @intrinsic_vwmaccus_mask_vx_nxv8i64_i32_nxv8i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a3, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a1)
-; CHECK-NEXT:    vsetvli a1, a2, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, a2, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v16, a0, v28, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwmul-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmul-rv32.ll
index 57f516d9ef15..7aaaf2324d87 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwmul-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmul-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8(
 define <vscale x 1 x i16> @intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.nxv2i8(
 define <vscale x 2 x i16> @intrinsic_vwmul_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv2i16_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.nxv4i8(
 define <vscale x 4 x i16> @intrinsic_vwmul_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv4i16_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.nxv8i8(
 define <vscale x 8 x i16> @intrinsic_vwmul_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv8i16_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.nxv16i8(
 define <vscale x 16 x i16> @intrinsic_vwmul_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv16i16_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.nxv32i8(
 define <vscale x 32 x i16> @intrinsic_vwmul_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv32i16_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -268,7 +268,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.nxv1i16(
 define <vscale x 1 x i32> @intrinsic_vwmul_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv1i32_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -308,7 +308,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.nxv2i16(
 define <vscale x 2 x i32> @intrinsic_vwmul_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv2i32_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -348,7 +348,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.nxv4i16(
 define <vscale x 4 x i32> @intrinsic_vwmul_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv4i32_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -388,7 +388,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.nxv8i16(
 define <vscale x 8 x i32> @intrinsic_vwmul_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv8i32_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -428,7 +428,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.nxv16i16(
 define <vscale x 16 x i32> @intrinsic_vwmul_mask_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv16i32_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -468,7 +468,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.i8(
 define <vscale x 1 x i16> @intrinsic_vwmul_mask_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv1i16_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.i8(
 define <vscale x 2 x i16> @intrinsic_vwmul_mask_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv2i16_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.i8(
 define <vscale x 4 x i16> @intrinsic_vwmul_mask_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv4i16_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i16> %0,
@@ -588,7 +588,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.i8(
 define <vscale x 8 x i16> @intrinsic_vwmul_mask_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv8i16_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i16> %0,
@@ -628,7 +628,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.i8(
 define <vscale x 16 x i16> @intrinsic_vwmul_mask_vx_nxv16i16_nxv16i8_i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv16i16_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i16> %0,
@@ -668,7 +668,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.i8(
 define <vscale x 32 x i16> @intrinsic_vwmul_mask_vx_nxv32i16_nxv32i8_i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv32i16_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i16> %0,
@@ -708,7 +708,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.i16(
 define <vscale x 1 x i32> @intrinsic_vwmul_mask_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv1i32_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.i16(
 define <vscale x 2 x i32> @intrinsic_vwmul_mask_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv2i32_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i32> %0,
@@ -788,7 +788,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.i16(
 define <vscale x 4 x i32> @intrinsic_vwmul_mask_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv4i32_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i32> %0,
@@ -828,7 +828,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.i16(
 define <vscale x 8 x i32> @intrinsic_vwmul_mask_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv8i32_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i32> %0,
@@ -868,7 +868,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.i16(
 define <vscale x 16 x i32> @intrinsic_vwmul_mask_vx_nxv16i32_nxv16i16_i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv16i32_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwmul-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmul-rv64.ll
index d361814644cc..8c5c8c968cb3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwmul-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmul-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8(
 define <vscale x 1 x i16> @intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.nxv2i8(
 define <vscale x 2 x i16> @intrinsic_vwmul_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv2i16_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.nxv4i8(
 define <vscale x 4 x i16> @intrinsic_vwmul_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv4i16_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.nxv8i8(
 define <vscale x 8 x i16> @intrinsic_vwmul_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv8i16_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.nxv16i8(
 define <vscale x 16 x i16> @intrinsic_vwmul_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv16i16_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.nxv32i8(
 define <vscale x 32 x i16> @intrinsic_vwmul_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv32i16_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -268,7 +268,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.nxv1i16(
 define <vscale x 1 x i32> @intrinsic_vwmul_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv1i32_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -308,7 +308,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.nxv2i16(
 define <vscale x 2 x i32> @intrinsic_vwmul_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv2i32_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -348,7 +348,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.nxv4i16(
 define <vscale x 4 x i32> @intrinsic_vwmul_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv4i32_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -388,7 +388,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.nxv8i16(
 define <vscale x 8 x i32> @intrinsic_vwmul_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv8i32_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -428,7 +428,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.nxv16i16(
 define <vscale x 16 x i32> @intrinsic_vwmul_mask_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv16i32_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -468,7 +468,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.nxv1i32(
 define <vscale x 1 x i64> @intrinsic_vwmul_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv1i64_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -508,7 +508,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.nxv2i32(
 define <vscale x 2 x i64> @intrinsic_vwmul_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv2i64_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -548,7 +548,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.nxv4i32(
 define <vscale x 4 x i64> @intrinsic_vwmul_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv4i64_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -588,7 +588,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.nxv8i32(
 define <vscale x 8 x i64> @intrinsic_vwmul_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv8i64_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -628,7 +628,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.i8(
 define <vscale x 1 x i16> @intrinsic_vwmul_mask_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv1i16_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i16> %0,
@@ -668,7 +668,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.i8(
 define <vscale x 2 x i16> @intrinsic_vwmul_mask_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv2i16_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i16> %0,
@@ -708,7 +708,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.i8(
 define <vscale x 4 x i16> @intrinsic_vwmul_mask_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv4i16_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i16> %0,
@@ -748,7 +748,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.i8(
 define <vscale x 8 x i16> @intrinsic_vwmul_mask_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv8i16_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i16> %0,
@@ -788,7 +788,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.i8(
 define <vscale x 16 x i16> @intrinsic_vwmul_mask_vx_nxv16i16_nxv16i8_i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv16i16_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i16> %0,
@@ -828,7 +828,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.i8(
 define <vscale x 32 x i16> @intrinsic_vwmul_mask_vx_nxv32i16_nxv32i8_i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv32i16_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i16> %0,
@@ -868,7 +868,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.i16(
 define <vscale x 1 x i32> @intrinsic_vwmul_mask_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv1i32_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i32> %0,
@@ -908,7 +908,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.i16(
 define <vscale x 2 x i32> @intrinsic_vwmul_mask_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv2i32_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i32> %0,
@@ -948,7 +948,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.i16(
 define <vscale x 4 x i32> @intrinsic_vwmul_mask_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv4i32_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i32> %0,
@@ -988,7 +988,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.i16(
 define <vscale x 8 x i32> @intrinsic_vwmul_mask_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv8i32_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i32> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.i16(
 define <vscale x 16 x i32> @intrinsic_vwmul_mask_vx_nxv16i32_nxv16i16_i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv16i32_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i32> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.i32(
 define <vscale x 1 x i64> @intrinsic_vwmul_mask_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv1i64_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.i32(
     <vscale x 1 x i64> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.i32(
 define <vscale x 2 x i64> @intrinsic_vwmul_mask_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv2i64_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.i32(
     <vscale x 2 x i64> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.i32(
 define <vscale x 4 x i64> @intrinsic_vwmul_mask_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv4i64_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.i32(
     <vscale x 4 x i64> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.i32(
 define <vscale x 8 x i64> @intrinsic_vwmul_mask_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv8i64_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.i32(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv32.ll
index 34c9be773157..40c35c56e71f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.nxv1i8(
 define <vscale x 1 x i16> @intrinsic_vwmulsu_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv1i16_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.nxv2i8(
 define <vscale x 2 x i16> @intrinsic_vwmulsu_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv2i16_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.nxv4i8(
 define <vscale x 4 x i16> @intrinsic_vwmulsu_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv4i16_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.nxv8i8(
 define <vscale x 8 x i16> @intrinsic_vwmulsu_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv8i16_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.nxv16i8(
 define <vscale x 16 x i16> @intrinsic_vwmulsu_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv16i16_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.nxv32i8(
 define <vscale x 32 x i16> @intrinsic_vwmulsu_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv32i16_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -268,7 +268,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.nxv1i16(
 define <vscale x 1 x i32> @intrinsic_vwmulsu_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv1i32_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -308,7 +308,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.nxv2i16(
 define <vscale x 2 x i32> @intrinsic_vwmulsu_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv2i32_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -348,7 +348,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.nxv4i16(
 define <vscale x 4 x i32> @intrinsic_vwmulsu_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv4i32_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -388,7 +388,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.nxv8i16(
 define <vscale x 8 x i32> @intrinsic_vwmulsu_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv8i32_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -428,7 +428,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.nxv16i16(
 define <vscale x 16 x i32> @intrinsic_vwmulsu_mask_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv16i32_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -468,7 +468,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.i8(
 define <vscale x 1 x i16> @intrinsic_vwmulsu_mask_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv1i16_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.i8(
 define <vscale x 2 x i16> @intrinsic_vwmulsu_mask_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv2i16_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.i8(
 define <vscale x 4 x i16> @intrinsic_vwmulsu_mask_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv4i16_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i16> %0,
@@ -588,7 +588,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.i8(
 define <vscale x 8 x i16> @intrinsic_vwmulsu_mask_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv8i16_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i16> %0,
@@ -628,7 +628,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.i8(
 define <vscale x 16 x i16> @intrinsic_vwmulsu_mask_vx_nxv16i16_nxv16i8_i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv16i16_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i16> %0,
@@ -668,7 +668,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.i8(
 define <vscale x 32 x i16> @intrinsic_vwmulsu_mask_vx_nxv32i16_nxv32i8_i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv32i16_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i16> %0,
@@ -708,7 +708,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.i16(
 define <vscale x 1 x i32> @intrinsic_vwmulsu_mask_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv1i32_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.i16(
 define <vscale x 2 x i32> @intrinsic_vwmulsu_mask_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv2i32_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i32> %0,
@@ -788,7 +788,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.i16(
 define <vscale x 4 x i32> @intrinsic_vwmulsu_mask_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv4i32_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i32> %0,
@@ -828,7 +828,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.i16(
 define <vscale x 8 x i32> @intrinsic_vwmulsu_mask_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv8i32_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i32> %0,
@@ -868,7 +868,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.i16(
 define <vscale x 16 x i32> @intrinsic_vwmulsu_mask_vx_nxv16i32_nxv16i16_i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv16i32_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv64.ll
index 089f4af5512c..9894ac7a2a2c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.nxv1i8(
 define <vscale x 1 x i16> @intrinsic_vwmulsu_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv1i16_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.nxv2i8(
 define <vscale x 2 x i16> @intrinsic_vwmulsu_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv2i16_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.nxv4i8(
 define <vscale x 4 x i16> @intrinsic_vwmulsu_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv4i16_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.nxv8i8(
 define <vscale x 8 x i16> @intrinsic_vwmulsu_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv8i16_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.nxv16i8(
 define <vscale x 16 x i16> @intrinsic_vwmulsu_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv16i16_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.nxv32i8(
 define <vscale x 32 x i16> @intrinsic_vwmulsu_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv32i16_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -268,7 +268,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.nxv1i16(
 define <vscale x 1 x i32> @intrinsic_vwmulsu_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv1i32_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -308,7 +308,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.nxv2i16(
 define <vscale x 2 x i32> @intrinsic_vwmulsu_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv2i32_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -348,7 +348,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.nxv4i16(
 define <vscale x 4 x i32> @intrinsic_vwmulsu_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv4i32_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -388,7 +388,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.nxv8i16(
 define <vscale x 8 x i32> @intrinsic_vwmulsu_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv8i32_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -428,7 +428,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.nxv16i16(
 define <vscale x 16 x i32> @intrinsic_vwmulsu_mask_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv16i32_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -468,7 +468,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.nxv1i32(
 define <vscale x 1 x i64> @intrinsic_vwmulsu_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv1i64_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -508,7 +508,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.nxv2i32(
 define <vscale x 2 x i64> @intrinsic_vwmulsu_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv2i64_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -548,7 +548,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.nxv4i32(
 define <vscale x 4 x i64> @intrinsic_vwmulsu_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv4i64_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -588,7 +588,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.nxv8i32(
 define <vscale x 8 x i64> @intrinsic_vwmulsu_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv8i64_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -628,7 +628,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.i8(
 define <vscale x 1 x i16> @intrinsic_vwmulsu_mask_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv1i16_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i16> %0,
@@ -668,7 +668,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.i8(
 define <vscale x 2 x i16> @intrinsic_vwmulsu_mask_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv2i16_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i16> %0,
@@ -708,7 +708,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.i8(
 define <vscale x 4 x i16> @intrinsic_vwmulsu_mask_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv4i16_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i16> %0,
@@ -748,7 +748,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.i8(
 define <vscale x 8 x i16> @intrinsic_vwmulsu_mask_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv8i16_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i16> %0,
@@ -788,7 +788,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.i8(
 define <vscale x 16 x i16> @intrinsic_vwmulsu_mask_vx_nxv16i16_nxv16i8_i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv16i16_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i16> %0,
@@ -828,7 +828,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.i8(
 define <vscale x 32 x i16> @intrinsic_vwmulsu_mask_vx_nxv32i16_nxv32i8_i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv32i16_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i16> %0,
@@ -868,7 +868,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.i16(
 define <vscale x 1 x i32> @intrinsic_vwmulsu_mask_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv1i32_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i32> %0,
@@ -908,7 +908,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.i16(
 define <vscale x 2 x i32> @intrinsic_vwmulsu_mask_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv2i32_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i32> %0,
@@ -948,7 +948,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.i16(
 define <vscale x 4 x i32> @intrinsic_vwmulsu_mask_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv4i32_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i32> %0,
@@ -988,7 +988,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.i16(
 define <vscale x 8 x i32> @intrinsic_vwmulsu_mask_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv8i32_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i32> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.i16(
 define <vscale x 16 x i32> @intrinsic_vwmulsu_mask_vx_nxv16i32_nxv16i16_i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv16i32_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i32> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.i32(
 define <vscale x 1 x i64> @intrinsic_vwmulsu_mask_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv1i64_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.i32(
     <vscale x 1 x i64> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.i32(
 define <vscale x 2 x i64> @intrinsic_vwmulsu_mask_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv2i64_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.i32(
     <vscale x 2 x i64> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.i32(
 define <vscale x 4 x i64> @intrinsic_vwmulsu_mask_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv4i64_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.i32(
     <vscale x 4 x i64> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.i32(
 define <vscale x 8 x i64> @intrinsic_vwmulsu_mask_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv8i64_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.i32(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv32.ll
index 3d8954401e8f..00de7fa1a99d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.nxv1i8(
 define <vscale x 1 x i16> @intrinsic_vwmulu_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv1i16_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.nxv2i8(
 define <vscale x 2 x i16> @intrinsic_vwmulu_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv2i16_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.nxv4i8(
 define <vscale x 4 x i16> @intrinsic_vwmulu_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv4i16_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.nxv8i8(
 define <vscale x 8 x i16> @intrinsic_vwmulu_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv8i16_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.nxv16i8(
 define <vscale x 16 x i16> @intrinsic_vwmulu_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv16i16_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.nxv32i8(
 define <vscale x 32 x i16> @intrinsic_vwmulu_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv32i16_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -268,7 +268,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.nxv1i16(
 define <vscale x 1 x i32> @intrinsic_vwmulu_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv1i32_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -308,7 +308,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.nxv2i16(
 define <vscale x 2 x i32> @intrinsic_vwmulu_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv2i32_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -348,7 +348,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.nxv4i16(
 define <vscale x 4 x i32> @intrinsic_vwmulu_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv4i32_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -388,7 +388,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.nxv8i16(
 define <vscale x 8 x i32> @intrinsic_vwmulu_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv8i32_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -428,7 +428,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.nxv16i16(
 define <vscale x 16 x i32> @intrinsic_vwmulu_mask_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv16i32_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -468,7 +468,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.i8(
 define <vscale x 1 x i16> @intrinsic_vwmulu_mask_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv1i16_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.i8(
 define <vscale x 2 x i16> @intrinsic_vwmulu_mask_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv2i16_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.i8(
 define <vscale x 4 x i16> @intrinsic_vwmulu_mask_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv4i16_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i16> %0,
@@ -588,7 +588,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.i8(
 define <vscale x 8 x i16> @intrinsic_vwmulu_mask_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv8i16_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i16> %0,
@@ -628,7 +628,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.i8(
 define <vscale x 16 x i16> @intrinsic_vwmulu_mask_vx_nxv16i16_nxv16i8_i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv16i16_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i16> %0,
@@ -668,7 +668,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.i8(
 define <vscale x 32 x i16> @intrinsic_vwmulu_mask_vx_nxv32i16_nxv32i8_i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv32i16_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i16> %0,
@@ -708,7 +708,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.i16(
 define <vscale x 1 x i32> @intrinsic_vwmulu_mask_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv1i32_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.i16(
 define <vscale x 2 x i32> @intrinsic_vwmulu_mask_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv2i32_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i32> %0,
@@ -788,7 +788,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.i16(
 define <vscale x 4 x i32> @intrinsic_vwmulu_mask_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv4i32_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i32> %0,
@@ -828,7 +828,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.i16(
 define <vscale x 8 x i32> @intrinsic_vwmulu_mask_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv8i32_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i32> %0,
@@ -868,7 +868,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.i16(
 define <vscale x 16 x i32> @intrinsic_vwmulu_mask_vx_nxv16i32_nxv16i16_i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv16i32_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv64.ll
index 3d9bff312eb1..c73340d120f0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.nxv1i8(
 define <vscale x 1 x i16> @intrinsic_vwmulu_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv1i16_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.nxv2i8(
 define <vscale x 2 x i16> @intrinsic_vwmulu_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv2i16_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.nxv4i8(
 define <vscale x 4 x i16> @intrinsic_vwmulu_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv4i16_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.nxv8i8(
 define <vscale x 8 x i16> @intrinsic_vwmulu_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv8i16_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.nxv16i8(
 define <vscale x 16 x i16> @intrinsic_vwmulu_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv16i16_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.nxv32i8(
 define <vscale x 32 x i16> @intrinsic_vwmulu_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv32i16_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -268,7 +268,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.nxv1i16(
 define <vscale x 1 x i32> @intrinsic_vwmulu_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv1i32_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -308,7 +308,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.nxv2i16(
 define <vscale x 2 x i32> @intrinsic_vwmulu_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv2i32_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -348,7 +348,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.nxv4i16(
 define <vscale x 4 x i32> @intrinsic_vwmulu_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv4i32_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -388,7 +388,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.nxv8i16(
 define <vscale x 8 x i32> @intrinsic_vwmulu_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv8i32_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -428,7 +428,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.nxv16i16(
 define <vscale x 16 x i32> @intrinsic_vwmulu_mask_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv16i32_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -468,7 +468,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.nxv1i32(
 define <vscale x 1 x i64> @intrinsic_vwmulu_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv1i64_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -508,7 +508,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.nxv2i32(
 define <vscale x 2 x i64> @intrinsic_vwmulu_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv2i64_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -548,7 +548,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.nxv4i32(
 define <vscale x 4 x i64> @intrinsic_vwmulu_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv4i64_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -588,7 +588,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.nxv8i32(
 define <vscale x 8 x i64> @intrinsic_vwmulu_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv8i64_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -628,7 +628,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.i8(
 define <vscale x 1 x i16> @intrinsic_vwmulu_mask_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv1i16_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i16> %0,
@@ -668,7 +668,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.i8(
 define <vscale x 2 x i16> @intrinsic_vwmulu_mask_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv2i16_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i16> %0,
@@ -708,7 +708,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.i8(
 define <vscale x 4 x i16> @intrinsic_vwmulu_mask_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv4i16_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i16> %0,
@@ -748,7 +748,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.i8(
 define <vscale x 8 x i16> @intrinsic_vwmulu_mask_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv8i16_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i16> %0,
@@ -788,7 +788,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.i8(
 define <vscale x 16 x i16> @intrinsic_vwmulu_mask_vx_nxv16i16_nxv16i8_i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv16i16_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i16> %0,
@@ -828,7 +828,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.i8(
 define <vscale x 32 x i16> @intrinsic_vwmulu_mask_vx_nxv32i16_nxv32i8_i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv32i16_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i16> %0,
@@ -868,7 +868,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.i16(
 define <vscale x 1 x i32> @intrinsic_vwmulu_mask_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv1i32_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i32> %0,
@@ -908,7 +908,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.i16(
 define <vscale x 2 x i32> @intrinsic_vwmulu_mask_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv2i32_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i32> %0,
@@ -948,7 +948,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.i16(
 define <vscale x 4 x i32> @intrinsic_vwmulu_mask_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv4i32_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i32> %0,
@@ -988,7 +988,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.i16(
 define <vscale x 8 x i32> @intrinsic_vwmulu_mask_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv8i32_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i32> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.i16(
 define <vscale x 16 x i32> @intrinsic_vwmulu_mask_vx_nxv16i32_nxv16i16_i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv16i32_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i32> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.i32(
 define <vscale x 1 x i64> @intrinsic_vwmulu_mask_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv1i64_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.i32(
     <vscale x 1 x i64> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.i32(
 define <vscale x 2 x i64> @intrinsic_vwmulu_mask_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv2i64_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.i32(
     <vscale x 2 x i64> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.i32(
 define <vscale x 4 x i64> @intrinsic_vwmulu_mask_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv4i64_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.i32(
     <vscale x 4 x i64> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.i32(
 define <vscale x 8 x i64> @intrinsic_vwmulu_mask_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv8i64_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.i32(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwsub-rv32.ll
index f5884729b06f..d9748d4b359d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwsub-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwsub-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.nxv1i8(
 define <vscale x 1 x i16> @intrinsic_vwsub_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv1i16_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.nxv2i8(
 define <vscale x 2 x i16> @intrinsic_vwsub_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv2i16_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.nxv4i8(
 define <vscale x 4 x i16> @intrinsic_vwsub_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv4i16_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.nxv8i8(
 define <vscale x 8 x i16> @intrinsic_vwsub_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv8i16_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.nxv16i8(
 define <vscale x 16 x i16> @intrinsic_vwsub_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv16i16_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.nxv32i8(
 define <vscale x 32 x i16> @intrinsic_vwsub_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv32i16_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -268,7 +268,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.nxv1i16(
 define <vscale x 1 x i32> @intrinsic_vwsub_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv1i32_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -308,7 +308,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.nxv2i16(
 define <vscale x 2 x i32> @intrinsic_vwsub_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv2i32_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -348,7 +348,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.nxv4i16(
 define <vscale x 4 x i32> @intrinsic_vwsub_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv4i32_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -388,7 +388,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.nxv8i16(
 define <vscale x 8 x i32> @intrinsic_vwsub_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv8i32_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -428,7 +428,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.nxv16i16(
 define <vscale x 16 x i32> @intrinsic_vwsub_mask_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv16i32_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -468,7 +468,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.i8(
 define <vscale x 1 x i16> @intrinsic_vwsub_mask_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv1i16_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.i8(
 define <vscale x 2 x i16> @intrinsic_vwsub_mask_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv2i16_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.i8(
 define <vscale x 4 x i16> @intrinsic_vwsub_mask_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv4i16_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i16> %0,
@@ -588,7 +588,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.i8(
 define <vscale x 8 x i16> @intrinsic_vwsub_mask_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv8i16_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i16> %0,
@@ -628,7 +628,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.i8(
 define <vscale x 16 x i16> @intrinsic_vwsub_mask_vx_nxv16i16_nxv16i8_i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv16i16_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i16> %0,
@@ -668,7 +668,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.i8(
 define <vscale x 32 x i16> @intrinsic_vwsub_mask_vx_nxv32i16_nxv32i8_i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv32i16_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i16> %0,
@@ -708,7 +708,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.i16(
 define <vscale x 1 x i32> @intrinsic_vwsub_mask_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv1i32_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.i16(
 define <vscale x 2 x i32> @intrinsic_vwsub_mask_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv2i32_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i32> %0,
@@ -788,7 +788,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.i16(
 define <vscale x 4 x i32> @intrinsic_vwsub_mask_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv4i32_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i32> %0,
@@ -828,7 +828,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.i16(
 define <vscale x 8 x i32> @intrinsic_vwsub_mask_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv8i32_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i32> %0,
@@ -868,7 +868,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.i16(
 define <vscale x 16 x i32> @intrinsic_vwsub_mask_vx_nxv16i32_nxv16i16_i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv16i32_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwsub-rv64.ll
index 8ba2e2c14d84..b85fc02b2af2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwsub-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwsub-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.nxv1i8(
 define <vscale x 1 x i16> @intrinsic_vwsub_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv1i16_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.nxv2i8(
 define <vscale x 2 x i16> @intrinsic_vwsub_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv2i16_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.nxv4i8(
 define <vscale x 4 x i16> @intrinsic_vwsub_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv4i16_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.nxv8i8(
 define <vscale x 8 x i16> @intrinsic_vwsub_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv8i16_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.nxv16i8(
 define <vscale x 16 x i16> @intrinsic_vwsub_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv16i16_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.nxv32i8(
 define <vscale x 32 x i16> @intrinsic_vwsub_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv32i16_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -268,7 +268,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.nxv1i16(
 define <vscale x 1 x i32> @intrinsic_vwsub_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv1i32_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -308,7 +308,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.nxv2i16(
 define <vscale x 2 x i32> @intrinsic_vwsub_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv2i32_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -348,7 +348,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.nxv4i16(
 define <vscale x 4 x i32> @intrinsic_vwsub_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv4i32_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -388,7 +388,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.nxv8i16(
 define <vscale x 8 x i32> @intrinsic_vwsub_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv8i32_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -428,7 +428,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.nxv16i16(
 define <vscale x 16 x i32> @intrinsic_vwsub_mask_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv16i32_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -468,7 +468,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.nxv1i32(
 define <vscale x 1 x i64> @intrinsic_vwsub_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv1i64_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -508,7 +508,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.nxv2i32(
 define <vscale x 2 x i64> @intrinsic_vwsub_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv2i64_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -548,7 +548,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.nxv4i32(
 define <vscale x 4 x i64> @intrinsic_vwsub_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv4i64_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -588,7 +588,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.nxv8i32(
 define <vscale x 8 x i64> @intrinsic_vwsub_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv8i64_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -628,7 +628,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.i8(
 define <vscale x 1 x i16> @intrinsic_vwsub_mask_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv1i16_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i16> %0,
@@ -668,7 +668,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.i8(
 define <vscale x 2 x i16> @intrinsic_vwsub_mask_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv2i16_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i16> %0,
@@ -708,7 +708,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.i8(
 define <vscale x 4 x i16> @intrinsic_vwsub_mask_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv4i16_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i16> %0,
@@ -748,7 +748,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.i8(
 define <vscale x 8 x i16> @intrinsic_vwsub_mask_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv8i16_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i16> %0,
@@ -788,7 +788,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.i8(
 define <vscale x 16 x i16> @intrinsic_vwsub_mask_vx_nxv16i16_nxv16i8_i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv16i16_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i16> %0,
@@ -828,7 +828,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.i8(
 define <vscale x 32 x i16> @intrinsic_vwsub_mask_vx_nxv32i16_nxv32i8_i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv32i16_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i16> %0,
@@ -868,7 +868,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.i16(
 define <vscale x 1 x i32> @intrinsic_vwsub_mask_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv1i32_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i32> %0,
@@ -908,7 +908,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.i16(
 define <vscale x 2 x i32> @intrinsic_vwsub_mask_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv2i32_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i32> %0,
@@ -948,7 +948,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.i16(
 define <vscale x 4 x i32> @intrinsic_vwsub_mask_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv4i32_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i32> %0,
@@ -988,7 +988,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.i16(
 define <vscale x 8 x i32> @intrinsic_vwsub_mask_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv8i32_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i32> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.i16(
 define <vscale x 16 x i32> @intrinsic_vwsub_mask_vx_nxv16i32_nxv16i16_i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv16i32_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i32> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.i32(
 define <vscale x 1 x i64> @intrinsic_vwsub_mask_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv1i64_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.i32(
     <vscale x 1 x i64> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.i32(
 define <vscale x 2 x i64> @intrinsic_vwsub_mask_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv2i64_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.i32(
     <vscale x 2 x i64> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.i32(
 define <vscale x 4 x i64> @intrinsic_vwsub_mask_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv4i64_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.i32(
     <vscale x 4 x i64> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.i32(
 define <vscale x 8 x i64> @intrinsic_vwsub_mask_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv8i64_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.i32(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv32.ll
index 47504572a489..404ca408adb7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwsub.w.mask.nxv1i16.nxv1i8(
 define <vscale x 1 x i16> @intrinsic_vwsub.w_mask_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv1i16_nxv1i16_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.mask.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwsub.w.mask.nxv2i16.nxv2i8(
 define <vscale x 2 x i16> @intrinsic_vwsub.w_mask_wv_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv2i16_nxv2i16_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.mask.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwsub.w.mask.nxv4i16.nxv4i8(
 define <vscale x 4 x i16> @intrinsic_vwsub.w_mask_wv_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv4i16_nxv4i16_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.mask.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwsub.w.mask.nxv8i16.nxv8i8(
 define <vscale x 8 x i16> @intrinsic_vwsub.w_mask_wv_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv8i16_nxv8i16_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.mask.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwsub.w.mask.nxv16i16.nxv16i8(
 define <vscale x 16 x i16> @intrinsic_vwsub.w_mask_wv_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv16i16_nxv16i16_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.mask.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwsub.w.mask.nxv32i16.nxv32i8(
 define <vscale x 32 x i16> @intrinsic_vwsub.w_mask_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv32i16_nxv32i16_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.mask.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -268,7 +268,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwsub.w.mask.nxv1i32.nxv1i16(
 define <vscale x 1 x i32> @intrinsic_vwsub.w_mask_wv_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv1i32_nxv1i32_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -308,7 +308,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwsub.w.mask.nxv2i32.nxv2i16(
 define <vscale x 2 x i32> @intrinsic_vwsub.w_mask_wv_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv2i32_nxv2i32_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -348,7 +348,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwsub.w.mask.nxv4i32.nxv4i16(
 define <vscale x 4 x i32> @intrinsic_vwsub.w_mask_wv_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv4i32_nxv4i32_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -388,7 +388,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwsub.w.mask.nxv8i32.nxv8i16(
 define <vscale x 8 x i32> @intrinsic_vwsub.w_mask_wv_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv8i32_nxv8i32_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -428,7 +428,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwsub.w.mask.nxv16i32.nxv16i16(
 define <vscale x 16 x i32> @intrinsic_vwsub.w_mask_wv_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv16i32_nxv16i32_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -468,7 +468,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwsub.w.mask.nxv1i16.i8(
 define <vscale x 1 x i16> @intrinsic_vwsub.w_mask_wx_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv1i16_nxv1i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.mask.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwsub.w.mask.nxv2i16.i8(
 define <vscale x 2 x i16> @intrinsic_vwsub.w_mask_wx_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv2i16_nxv2i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.mask.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwsub.w.mask.nxv4i16.i8(
 define <vscale x 4 x i16> @intrinsic_vwsub.w_mask_wx_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv4i16_nxv4i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.mask.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -588,7 +588,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwsub.w.mask.nxv8i16.i8(
 define <vscale x 8 x i16> @intrinsic_vwsub.w_mask_wx_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv8i16_nxv8i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.mask.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -628,7 +628,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwsub.w.mask.nxv16i16.i8(
 define <vscale x 16 x i16> @intrinsic_vwsub.w_mask_wx_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv16i16_nxv16i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.mask.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -668,7 +668,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwsub.w.mask.nxv32i16.i8(
 define <vscale x 32 x i16> @intrinsic_vwsub.w_mask_wx_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv32i16_nxv32i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.mask.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -708,7 +708,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwsub.w.mask.nxv1i32.i16(
 define <vscale x 1 x i32> @intrinsic_vwsub.w_mask_wx_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv1i32_nxv1i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.mask.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwsub.w.mask.nxv2i32.i16(
 define <vscale x 2 x i32> @intrinsic_vwsub.w_mask_wx_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv2i32_nxv2i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.mask.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -788,7 +788,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwsub.w.mask.nxv4i32.i16(
 define <vscale x 4 x i32> @intrinsic_vwsub.w_mask_wx_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv4i32_nxv4i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.mask.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -828,7 +828,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwsub.w.mask.nxv8i32.i16(
 define <vscale x 8 x i32> @intrinsic_vwsub.w_mask_wx_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv8i32_nxv8i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.mask.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -868,7 +868,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwsub.w.mask.nxv16i32.i16(
 define <vscale x 16 x i32> @intrinsic_vwsub.w_mask_wx_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv16i32_nxv16i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.mask.nxv16i32.i16(
     <vscale x 16 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv64.ll
index 8eed85f92ad5..6c8c83b683d1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwsub.w.mask.nxv1i16.nxv1i8(
 define <vscale x 1 x i16> @intrinsic_vwsub.w_mask_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv1i16_nxv1i16_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.mask.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwsub.w.mask.nxv2i16.nxv2i8(
 define <vscale x 2 x i16> @intrinsic_vwsub.w_mask_wv_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv2i16_nxv2i16_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.mask.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwsub.w.mask.nxv4i16.nxv4i8(
 define <vscale x 4 x i16> @intrinsic_vwsub.w_mask_wv_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv4i16_nxv4i16_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.mask.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwsub.w.mask.nxv8i16.nxv8i8(
 define <vscale x 8 x i16> @intrinsic_vwsub.w_mask_wv_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv8i16_nxv8i16_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.mask.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwsub.w.mask.nxv16i16.nxv16i8(
 define <vscale x 16 x i16> @intrinsic_vwsub.w_mask_wv_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv16i16_nxv16i16_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.mask.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwsub.w.mask.nxv32i16.nxv32i8(
 define <vscale x 32 x i16> @intrinsic_vwsub.w_mask_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv32i16_nxv32i16_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.mask.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -268,7 +268,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwsub.w.mask.nxv1i32.nxv1i16(
 define <vscale x 1 x i32> @intrinsic_vwsub.w_mask_wv_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv1i32_nxv1i32_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -308,7 +308,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwsub.w.mask.nxv2i32.nxv2i16(
 define <vscale x 2 x i32> @intrinsic_vwsub.w_mask_wv_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv2i32_nxv2i32_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -348,7 +348,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwsub.w.mask.nxv4i32.nxv4i16(
 define <vscale x 4 x i32> @intrinsic_vwsub.w_mask_wv_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv4i32_nxv4i32_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -388,7 +388,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwsub.w.mask.nxv8i32.nxv8i16(
 define <vscale x 8 x i32> @intrinsic_vwsub.w_mask_wv_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv8i32_nxv8i32_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -428,7 +428,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwsub.w.mask.nxv16i32.nxv16i16(
 define <vscale x 16 x i32> @intrinsic_vwsub.w_mask_wv_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv16i32_nxv16i32_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -468,7 +468,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwsub.w.mask.nxv1i64.nxv1i32(
 define <vscale x 1 x i64> @intrinsic_vwsub.w_mask_wv_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv1i64_nxv1i64_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -508,7 +508,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vwsub.w.mask.nxv2i64.nxv2i32(
 define <vscale x 2 x i64> @intrinsic_vwsub.w_mask_wv_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv2i64_nxv2i64_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -548,7 +548,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vwsub.w.mask.nxv4i64.nxv4i32(
 define <vscale x 4 x i64> @intrinsic_vwsub.w_mask_wv_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv4i64_nxv4i64_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -588,7 +588,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vwsub.w.mask.nxv8i64.nxv8i32(
 define <vscale x 8 x i64> @intrinsic_vwsub.w_mask_wv_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv8i64_nxv8i64_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -628,7 +628,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwsub.w.mask.nxv1i16.i8(
 define <vscale x 1 x i16> @intrinsic_vwsub.w_mask_wx_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv1i16_nxv1i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.mask.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -668,7 +668,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwsub.w.mask.nxv2i16.i8(
 define <vscale x 2 x i16> @intrinsic_vwsub.w_mask_wx_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv2i16_nxv2i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.mask.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -708,7 +708,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwsub.w.mask.nxv4i16.i8(
 define <vscale x 4 x i16> @intrinsic_vwsub.w_mask_wx_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv4i16_nxv4i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.mask.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -748,7 +748,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwsub.w.mask.nxv8i16.i8(
 define <vscale x 8 x i16> @intrinsic_vwsub.w_mask_wx_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv8i16_nxv8i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.mask.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -788,7 +788,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwsub.w.mask.nxv16i16.i8(
 define <vscale x 16 x i16> @intrinsic_vwsub.w_mask_wx_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv16i16_nxv16i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.mask.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -828,7 +828,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwsub.w.mask.nxv32i16.i8(
 define <vscale x 32 x i16> @intrinsic_vwsub.w_mask_wx_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv32i16_nxv32i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.mask.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -868,7 +868,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwsub.w.mask.nxv1i32.i16(
 define <vscale x 1 x i32> @intrinsic_vwsub.w_mask_wx_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv1i32_nxv1i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.mask.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -908,7 +908,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwsub.w.mask.nxv2i32.i16(
 define <vscale x 2 x i32> @intrinsic_vwsub.w_mask_wx_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv2i32_nxv2i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.mask.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -948,7 +948,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwsub.w.mask.nxv4i32.i16(
 define <vscale x 4 x i32> @intrinsic_vwsub.w_mask_wx_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv4i32_nxv4i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.mask.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -988,7 +988,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwsub.w.mask.nxv8i32.i16(
 define <vscale x 8 x i32> @intrinsic_vwsub.w_mask_wx_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv8i32_nxv8i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.mask.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwsub.w.mask.nxv16i32.i16(
 define <vscale x 16 x i32> @intrinsic_vwsub.w_mask_wx_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv16i32_nxv16i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.mask.nxv16i32.i16(
     <vscale x 16 x i32> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwsub.w.mask.nxv1i64.i32(
 define <vscale x 1 x i64> @intrinsic_vwsub.w_mask_wx_nxv1i64_nxv1i64_i32(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv1i64_nxv1i64_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.mask.nxv1i64.i32(
     <vscale x 1 x i64> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vwsub.w.mask.nxv2i64.i32(
 define <vscale x 2 x i64> @intrinsic_vwsub.w_mask_wx_nxv2i64_nxv2i64_i32(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv2i64_nxv2i64_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.mask.nxv2i64.i32(
     <vscale x 2 x i64> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vwsub.w.mask.nxv4i64.i32(
 define <vscale x 4 x i64> @intrinsic_vwsub.w_mask_wx_nxv4i64_nxv4i64_i32(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv4i64_nxv4i64_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.mask.nxv4i64.i32(
     <vscale x 4 x i64> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vwsub.w.mask.nxv8i64.i32(
 define <vscale x 8 x i64> @intrinsic_vwsub.w_mask_wx_nxv8i64_nxv8i64_i32(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv8i64_nxv8i64_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.mask.nxv8i64.i32(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv32.ll
index 9ecf4e8dd3cc..e245b462a51d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.nxv1i8(
 define <vscale x 1 x i16> @intrinsic_vwsubu_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv1i16_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.nxv2i8(
 define <vscale x 2 x i16> @intrinsic_vwsubu_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv2i16_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.nxv4i8(
 define <vscale x 4 x i16> @intrinsic_vwsubu_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv4i16_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.nxv8i8(
 define <vscale x 8 x i16> @intrinsic_vwsubu_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv8i16_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.nxv16i8(
 define <vscale x 16 x i16> @intrinsic_vwsubu_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv16i16_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.nxv32i8(
 define <vscale x 32 x i16> @intrinsic_vwsubu_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv32i16_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -268,7 +268,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.nxv1i16(
 define <vscale x 1 x i32> @intrinsic_vwsubu_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv1i32_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -308,7 +308,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.nxv2i16(
 define <vscale x 2 x i32> @intrinsic_vwsubu_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv2i32_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -348,7 +348,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.nxv4i16(
 define <vscale x 4 x i32> @intrinsic_vwsubu_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv4i32_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -388,7 +388,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.nxv8i16(
 define <vscale x 8 x i32> @intrinsic_vwsubu_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv8i32_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -428,7 +428,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.nxv16i16(
 define <vscale x 16 x i32> @intrinsic_vwsubu_mask_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv16i32_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -468,7 +468,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.i8(
 define <vscale x 1 x i16> @intrinsic_vwsubu_mask_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv1i16_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.i8(
 define <vscale x 2 x i16> @intrinsic_vwsubu_mask_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv2i16_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.i8(
 define <vscale x 4 x i16> @intrinsic_vwsubu_mask_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv4i16_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i16> %0,
@@ -588,7 +588,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.i8(
 define <vscale x 8 x i16> @intrinsic_vwsubu_mask_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv8i16_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i16> %0,
@@ -628,7 +628,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.i8(
 define <vscale x 16 x i16> @intrinsic_vwsubu_mask_vx_nxv16i16_nxv16i8_i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv16i16_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i16> %0,
@@ -668,7 +668,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.i8(
 define <vscale x 32 x i16> @intrinsic_vwsubu_mask_vx_nxv32i16_nxv32i8_i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv32i16_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i16> %0,
@@ -708,7 +708,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.i16(
 define <vscale x 1 x i32> @intrinsic_vwsubu_mask_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv1i32_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.i16(
 define <vscale x 2 x i32> @intrinsic_vwsubu_mask_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv2i32_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i32> %0,
@@ -788,7 +788,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.i16(
 define <vscale x 4 x i32> @intrinsic_vwsubu_mask_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv4i32_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i32> %0,
@@ -828,7 +828,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.i16(
 define <vscale x 8 x i32> @intrinsic_vwsubu_mask_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv8i32_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i32> %0,
@@ -868,7 +868,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.i16(
 define <vscale x 16 x i32> @intrinsic_vwsubu_mask_vx_nxv16i32_nxv16i16_i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv16i32_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv64.ll
index 221bc10b3898..3700f2405ff6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.nxv1i8(
 define <vscale x 1 x i16> @intrinsic_vwsubu_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv1i16_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.nxv2i8(
 define <vscale x 2 x i16> @intrinsic_vwsubu_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv2i16_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.nxv4i8(
 define <vscale x 4 x i16> @intrinsic_vwsubu_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv4i16_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.nxv8i8(
 define <vscale x 8 x i16> @intrinsic_vwsubu_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv8i16_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.nxv16i8(
 define <vscale x 16 x i16> @intrinsic_vwsubu_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv16i16_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.nxv32i8(
 define <vscale x 32 x i16> @intrinsic_vwsubu_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv32i16_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -268,7 +268,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.nxv1i16(
 define <vscale x 1 x i32> @intrinsic_vwsubu_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv1i32_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -308,7 +308,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.nxv2i16(
 define <vscale x 2 x i32> @intrinsic_vwsubu_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv2i32_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -348,7 +348,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.nxv4i16(
 define <vscale x 4 x i32> @intrinsic_vwsubu_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv4i32_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -388,7 +388,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.nxv8i16(
 define <vscale x 8 x i32> @intrinsic_vwsubu_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv8i32_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -428,7 +428,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.nxv16i16(
 define <vscale x 16 x i32> @intrinsic_vwsubu_mask_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv16i32_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -468,7 +468,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.nxv1i32(
 define <vscale x 1 x i64> @intrinsic_vwsubu_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv1i64_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -508,7 +508,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.nxv2i32(
 define <vscale x 2 x i64> @intrinsic_vwsubu_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv2i64_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -548,7 +548,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.nxv4i32(
 define <vscale x 4 x i64> @intrinsic_vwsubu_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv4i64_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -588,7 +588,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.nxv8i32(
 define <vscale x 8 x i64> @intrinsic_vwsubu_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv8i64_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -628,7 +628,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.i8(
 define <vscale x 1 x i16> @intrinsic_vwsubu_mask_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv1i16_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i16> %0,
@@ -668,7 +668,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.i8(
 define <vscale x 2 x i16> @intrinsic_vwsubu_mask_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv2i16_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i16> %0,
@@ -708,7 +708,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.i8(
 define <vscale x 4 x i16> @intrinsic_vwsubu_mask_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv4i16_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i16> %0,
@@ -748,7 +748,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.i8(
 define <vscale x 8 x i16> @intrinsic_vwsubu_mask_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv8i16_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i16> %0,
@@ -788,7 +788,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.i8(
 define <vscale x 16 x i16> @intrinsic_vwsubu_mask_vx_nxv16i16_nxv16i8_i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv16i16_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i16> %0,
@@ -828,7 +828,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.i8(
 define <vscale x 32 x i16> @intrinsic_vwsubu_mask_vx_nxv32i16_nxv32i8_i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv32i16_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i16> %0,
@@ -868,7 +868,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.i16(
 define <vscale x 1 x i32> @intrinsic_vwsubu_mask_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv1i32_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i32> %0,
@@ -908,7 +908,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.i16(
 define <vscale x 2 x i32> @intrinsic_vwsubu_mask_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv2i32_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i32> %0,
@@ -948,7 +948,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.i16(
 define <vscale x 4 x i32> @intrinsic_vwsubu_mask_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv4i32_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i32> %0,
@@ -988,7 +988,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.i16(
 define <vscale x 8 x i32> @intrinsic_vwsubu_mask_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv8i32_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i32> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.i16(
 define <vscale x 16 x i32> @intrinsic_vwsubu_mask_vx_nxv16i32_nxv16i16_i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv16i32_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i32> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.i32(
 define <vscale x 1 x i64> @intrinsic_vwsubu_mask_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv1i64_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.i32(
     <vscale x 1 x i64> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.i32(
 define <vscale x 2 x i64> @intrinsic_vwsubu_mask_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv2i64_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.i32(
     <vscale x 2 x i64> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.i32(
 define <vscale x 4 x i64> @intrinsic_vwsubu_mask_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv4i64_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.i32(
     <vscale x 4 x i64> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.i32(
 define <vscale x 8 x i64> @intrinsic_vwsubu_mask_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv8i64_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.i32(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv32.ll
index 4c35bec0258c..851fff7bb207 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwsubu.w.mask.nxv1i16.nxv1i8(
 define <vscale x 1 x i16> @intrinsic_vwsubu.w_mask_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv1i16_nxv1i16_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.mask.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwsubu.w.mask.nxv2i16.nxv2i8(
 define <vscale x 2 x i16> @intrinsic_vwsubu.w_mask_wv_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv2i16_nxv2i16_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.mask.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwsubu.w.mask.nxv4i16.nxv4i8(
 define <vscale x 4 x i16> @intrinsic_vwsubu.w_mask_wv_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv4i16_nxv4i16_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.mask.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwsubu.w.mask.nxv8i16.nxv8i8(
 define <vscale x 8 x i16> @intrinsic_vwsubu.w_mask_wv_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv8i16_nxv8i16_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.mask.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwsubu.w.mask.nxv16i16.nxv16i8(
 define <vscale x 16 x i16> @intrinsic_vwsubu.w_mask_wv_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv16i16_nxv16i16_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.mask.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwsubu.w.mask.nxv32i16.nxv32i8(
 define <vscale x 32 x i16> @intrinsic_vwsubu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.mask.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -268,7 +268,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwsubu.w.mask.nxv1i32.nxv1i16(
 define <vscale x 1 x i32> @intrinsic_vwsubu.w_mask_wv_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv1i32_nxv1i32_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -308,7 +308,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwsubu.w.mask.nxv2i32.nxv2i16(
 define <vscale x 2 x i32> @intrinsic_vwsubu.w_mask_wv_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv2i32_nxv2i32_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -348,7 +348,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwsubu.w.mask.nxv4i32.nxv4i16(
 define <vscale x 4 x i32> @intrinsic_vwsubu.w_mask_wv_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv4i32_nxv4i32_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -388,7 +388,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwsubu.w.mask.nxv8i32.nxv8i16(
 define <vscale x 8 x i32> @intrinsic_vwsubu.w_mask_wv_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv8i32_nxv8i32_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -428,7 +428,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwsubu.w.mask.nxv16i32.nxv16i16(
 define <vscale x 16 x i32> @intrinsic_vwsubu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -468,7 +468,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwsubu.w.mask.nxv1i16.i8(
 define <vscale x 1 x i16> @intrinsic_vwsubu.w_mask_wx_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv1i16_nxv1i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.mask.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwsubu.w.mask.nxv2i16.i8(
 define <vscale x 2 x i16> @intrinsic_vwsubu.w_mask_wx_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv2i16_nxv2i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.mask.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwsubu.w.mask.nxv4i16.i8(
 define <vscale x 4 x i16> @intrinsic_vwsubu.w_mask_wx_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv4i16_nxv4i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.mask.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -588,7 +588,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwsubu.w.mask.nxv8i16.i8(
 define <vscale x 8 x i16> @intrinsic_vwsubu.w_mask_wx_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv8i16_nxv8i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.mask.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -628,7 +628,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwsubu.w.mask.nxv16i16.i8(
 define <vscale x 16 x i16> @intrinsic_vwsubu.w_mask_wx_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv16i16_nxv16i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.mask.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -668,7 +668,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwsubu.w.mask.nxv32i16.i8(
 define <vscale x 32 x i16> @intrinsic_vwsubu.w_mask_wx_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv32i16_nxv32i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.mask.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -708,7 +708,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwsubu.w.mask.nxv1i32.i16(
 define <vscale x 1 x i32> @intrinsic_vwsubu.w_mask_wx_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv1i32_nxv1i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.mask.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwsubu.w.mask.nxv2i32.i16(
 define <vscale x 2 x i32> @intrinsic_vwsubu.w_mask_wx_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv2i32_nxv2i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.mask.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -788,7 +788,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwsubu.w.mask.nxv4i32.i16(
 define <vscale x 4 x i32> @intrinsic_vwsubu.w_mask_wx_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv4i32_nxv4i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.mask.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -828,7 +828,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwsubu.w.mask.nxv8i32.i16(
 define <vscale x 8 x i32> @intrinsic_vwsubu.w_mask_wx_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv8i32_nxv8i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.mask.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -868,7 +868,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwsubu.w.mask.nxv16i32.i16(
 define <vscale x 16 x i32> @intrinsic_vwsubu.w_mask_wx_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv16i32_nxv16i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.mask.nxv16i32.i16(
     <vscale x 16 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv64.ll
index 41d8e55fb15d..fe42cc3d5744 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwsubu.w.mask.nxv1i16.nxv1i8(
 define <vscale x 1 x i16> @intrinsic_vwsubu.w_mask_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv1i16_nxv1i16_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.mask.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwsubu.w.mask.nxv2i16.nxv2i8(
 define <vscale x 2 x i16> @intrinsic_vwsubu.w_mask_wv_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv2i16_nxv2i16_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.mask.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwsubu.w.mask.nxv4i16.nxv4i8(
 define <vscale x 4 x i16> @intrinsic_vwsubu.w_mask_wv_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv4i16_nxv4i16_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.mask.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwsubu.w.mask.nxv8i16.nxv8i8(
 define <vscale x 8 x i16> @intrinsic_vwsubu.w_mask_wv_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv8i16_nxv8i16_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.mask.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwsubu.w.mask.nxv16i16.nxv16i8(
 define <vscale x 16 x i16> @intrinsic_vwsubu.w_mask_wv_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv16i16_nxv16i16_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.mask.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwsubu.w.mask.nxv32i16.nxv32i8(
 define <vscale x 32 x i16> @intrinsic_vwsubu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.mask.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -268,7 +268,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwsubu.w.mask.nxv1i32.nxv1i16(
 define <vscale x 1 x i32> @intrinsic_vwsubu.w_mask_wv_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv1i32_nxv1i32_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -308,7 +308,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwsubu.w.mask.nxv2i32.nxv2i16(
 define <vscale x 2 x i32> @intrinsic_vwsubu.w_mask_wv_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv2i32_nxv2i32_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -348,7 +348,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwsubu.w.mask.nxv4i32.nxv4i16(
 define <vscale x 4 x i32> @intrinsic_vwsubu.w_mask_wv_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv4i32_nxv4i32_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -388,7 +388,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwsubu.w.mask.nxv8i32.nxv8i16(
 define <vscale x 8 x i32> @intrinsic_vwsubu.w_mask_wv_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv8i32_nxv8i32_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -428,7 +428,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwsubu.w.mask.nxv16i32.nxv16i16(
 define <vscale x 16 x i32> @intrinsic_vwsubu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -468,7 +468,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwsubu.w.mask.nxv1i64.nxv1i32(
 define <vscale x 1 x i64> @intrinsic_vwsubu.w_mask_wv_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv1i64_nxv1i64_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -508,7 +508,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vwsubu.w.mask.nxv2i64.nxv2i32(
 define <vscale x 2 x i64> @intrinsic_vwsubu.w_mask_wv_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv2i64_nxv2i64_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -548,7 +548,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vwsubu.w.mask.nxv4i64.nxv4i32(
 define <vscale x 4 x i64> @intrinsic_vwsubu.w_mask_wv_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv4i64_nxv4i64_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -588,7 +588,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vwsubu.w.mask.nxv8i64.nxv8i32(
 define <vscale x 8 x i64> @intrinsic_vwsubu.w_mask_wv_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv8i64_nxv8i64_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -628,7 +628,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwsubu.w.mask.nxv1i16.i8(
 define <vscale x 1 x i16> @intrinsic_vwsubu.w_mask_wx_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv1i16_nxv1i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.mask.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -668,7 +668,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwsubu.w.mask.nxv2i16.i8(
 define <vscale x 2 x i16> @intrinsic_vwsubu.w_mask_wx_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv2i16_nxv2i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.mask.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -708,7 +708,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwsubu.w.mask.nxv4i16.i8(
 define <vscale x 4 x i16> @intrinsic_vwsubu.w_mask_wx_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv4i16_nxv4i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.mask.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -748,7 +748,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwsubu.w.mask.nxv8i16.i8(
 define <vscale x 8 x i16> @intrinsic_vwsubu.w_mask_wx_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv8i16_nxv8i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.mask.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -788,7 +788,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwsubu.w.mask.nxv16i16.i8(
 define <vscale x 16 x i16> @intrinsic_vwsubu.w_mask_wx_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv16i16_nxv16i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.mask.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -828,7 +828,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwsubu.w.mask.nxv32i16.i8(
 define <vscale x 32 x i16> @intrinsic_vwsubu.w_mask_wx_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv32i16_nxv32i16_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.mask.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -868,7 +868,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwsubu.w.mask.nxv1i32.i16(
 define <vscale x 1 x i32> @intrinsic_vwsubu.w_mask_wx_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv1i32_nxv1i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.mask.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -908,7 +908,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwsubu.w.mask.nxv2i32.i16(
 define <vscale x 2 x i32> @intrinsic_vwsubu.w_mask_wx_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv2i32_nxv2i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.mask.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -948,7 +948,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwsubu.w.mask.nxv4i32.i16(
 define <vscale x 4 x i32> @intrinsic_vwsubu.w_mask_wx_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv4i32_nxv4i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.mask.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -988,7 +988,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwsubu.w.mask.nxv8i32.i16(
 define <vscale x 8 x i32> @intrinsic_vwsubu.w_mask_wx_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv8i32_nxv8i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.mask.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwsubu.w.mask.nxv16i32.i16(
 define <vscale x 16 x i32> @intrinsic_vwsubu.w_mask_wx_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv16i32_nxv16i32_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.mask.nxv16i32.i16(
     <vscale x 16 x i32> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwsubu.w.mask.nxv1i64.i32(
 define <vscale x 1 x i64> @intrinsic_vwsubu.w_mask_wx_nxv1i64_nxv1i64_i32(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv1i64_nxv1i64_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.mask.nxv1i64.i32(
     <vscale x 1 x i64> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vwsubu.w.mask.nxv2i64.i32(
 define <vscale x 2 x i64> @intrinsic_vwsubu.w_mask_wx_nxv2i64_nxv2i64_i32(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv2i64_nxv2i64_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.mask.nxv2i64.i32(
     <vscale x 2 x i64> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vwsubu.w.mask.nxv4i64.i32(
 define <vscale x 4 x i64> @intrinsic_vwsubu.w_mask_wx_nxv4i64_nxv4i64_i32(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv4i64_nxv4i64_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.mask.nxv4i64.i32(
     <vscale x 4 x i64> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vwsubu.w.mask.nxv8i64.i32(
 define <vscale x 8 x i64> @intrinsic_vwsubu.w_mask_wx_nxv8i64_nxv8i64_i32(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv8i64_nxv8i64_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.mask.nxv8i64.i32(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vxor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vxor-rv32.ll
index ee631bd6623b..d7cce7300274 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vxor-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vxor-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vxor_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vxor_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vxor_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vxor_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vxor_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vxor_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vxor_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv64i8_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -308,7 +308,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vxor_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vxor_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vxor_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vxor_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vxor_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vxor_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv32i16_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vxor_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -588,7 +588,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vxor_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -628,7 +628,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vxor_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -668,7 +668,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vxor_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -708,7 +708,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vxor_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv16i32_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vxor_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -788,7 +788,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vxor_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -828,7 +828,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vxor_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -868,7 +868,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vxor_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -908,7 +908,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vxor_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -948,7 +948,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vxor_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -988,7 +988,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vxor_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vxor_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vxor_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vxor_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vxor_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vxor_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1228,7 +1228,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vxor_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1268,7 +1268,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vxor_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1308,7 +1308,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vxor_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1348,7 +1348,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vxor_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1388,7 +1388,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vxor_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1428,7 +1428,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vxor_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1456,7 +1456,7 @@ entry:
 define <vscale x 1 x i8> @intrinsic_vxor_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1484,7 +1484,7 @@ entry:
 define <vscale x 2 x i8> @intrinsic_vxor_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1512,7 +1512,7 @@ entry:
 define <vscale x 4 x i8> @intrinsic_vxor_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1540,7 +1540,7 @@ entry:
 define <vscale x 8 x i8> @intrinsic_vxor_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1568,7 +1568,7 @@ entry:
 define <vscale x 16 x i8> @intrinsic_vxor_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1596,7 +1596,7 @@ entry:
 define <vscale x 32 x i8> @intrinsic_vxor_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1624,7 +1624,7 @@ entry:
 define <vscale x 64 x i8> @intrinsic_vxor_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1652,7 +1652,7 @@ entry:
 define <vscale x 1 x i16> @intrinsic_vxor_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1680,7 +1680,7 @@ entry:
 define <vscale x 2 x i16> @intrinsic_vxor_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1708,7 +1708,7 @@ entry:
 define <vscale x 4 x i16> @intrinsic_vxor_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1736,7 +1736,7 @@ entry:
 define <vscale x 8 x i16> @intrinsic_vxor_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1764,7 +1764,7 @@ entry:
 define <vscale x 16 x i16> @intrinsic_vxor_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1792,7 +1792,7 @@ entry:
 define <vscale x 32 x i16> @intrinsic_vxor_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1820,7 +1820,7 @@ entry:
 define <vscale x 1 x i32> @intrinsic_vxor_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1848,7 +1848,7 @@ entry:
 define <vscale x 2 x i32> @intrinsic_vxor_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1876,7 +1876,7 @@ entry:
 define <vscale x 4 x i32> @intrinsic_vxor_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1904,7 +1904,7 @@ entry:
 define <vscale x 8 x i32> @intrinsic_vxor_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1932,7 +1932,7 @@ entry:
 define <vscale x 16 x i32> @intrinsic_vxor_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vxor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vxor-rv64.ll
index 5395e42429d0..a346770dec49 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vxor-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vxor-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vxor_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv1i8_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vxor_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv2i8_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -108,7 +108,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vxor_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv4i8_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vxor_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv8i8_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -188,7 +188,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vxor_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv16i8_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -228,7 +228,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vxor_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv32i8_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -268,7 +268,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vxor_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv64i8_nxv64i8_nxv64i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -308,7 +308,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vxor_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv1i16_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -348,7 +348,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vxor_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv2i16_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -388,7 +388,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vxor_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv4i16_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -428,7 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vxor_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv8i16_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -468,7 +468,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vxor_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv16i16_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vxor_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv32i16_nxv32i16_nxv32i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -548,7 +548,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vxor_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv1i32_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -588,7 +588,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vxor_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv2i32_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -628,7 +628,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vxor_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv4i32_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -668,7 +668,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vxor_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv8i32_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -708,7 +708,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vxor_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv16i32_nxv16i32_nxv16i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -748,7 +748,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vxor_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv1i64_nxv1i64_nxv1i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -788,7 +788,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.nxv2i64(
 define <vscale x 2 x i64> @intrinsic_vxor_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv2i64_nxv2i64_nxv2i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -828,7 +828,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.nxv4i64(
 define <vscale x 4 x i64> @intrinsic_vxor_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv4i64_nxv4i64_nxv4i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -868,7 +868,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.nxv8i64(
 define <vscale x 8 x i64> @intrinsic_vxor_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv8i64_nxv8i64_nxv8i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vxor.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -908,7 +908,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vxor_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -948,7 +948,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vxor_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -988,7 +988,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vxor_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1028,7 +1028,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vxor_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1068,7 +1068,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vxor_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1108,7 +1108,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vxor_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1148,7 +1148,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vxor_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1188,7 +1188,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vxor_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1228,7 +1228,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vxor_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1268,7 +1268,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vxor_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1308,7 +1308,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vxor_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1348,7 +1348,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vxor_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1388,7 +1388,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vxor_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1428,7 +1428,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vxor_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1468,7 +1468,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vxor_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1508,7 +1508,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vxor_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1548,7 +1548,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vxor_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1588,7 +1588,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vxor_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1628,7 +1628,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.i64(
 define <vscale x 1 x i64> @intrinsic_vxor_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv1i64_nxv1i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1668,7 +1668,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.i64(
 define <vscale x 2 x i64> @intrinsic_vxor_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv2i64_nxv2i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1708,7 +1708,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.i64(
 define <vscale x 4 x i64> @intrinsic_vxor_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv4i64_nxv4i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1748,7 +1748,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.i64(
 define <vscale x 8 x i64> @intrinsic_vxor_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv8i64_nxv8i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vxor.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1776,7 +1776,7 @@ entry:
 define <vscale x 1 x i8> @intrinsic_vxor_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv1i8_nxv1i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
 ; CHECK:       vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1804,7 +1804,7 @@ entry:
 define <vscale x 2 x i8> @intrinsic_vxor_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv2i8_nxv2i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
 ; CHECK:       vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1832,7 +1832,7 @@ entry:
 define <vscale x 4 x i8> @intrinsic_vxor_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv4i8_nxv4i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
 ; CHECK:       vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1860,7 +1860,7 @@ entry:
 define <vscale x 8 x i8> @intrinsic_vxor_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv8i8_nxv8i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
 ; CHECK:       vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1888,7 +1888,7 @@ entry:
 define <vscale x 16 x i8> @intrinsic_vxor_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv16i8_nxv16i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
 ; CHECK:       vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1916,7 +1916,7 @@ entry:
 define <vscale x 32 x i8> @intrinsic_vxor_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv32i8_nxv32i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
 ; CHECK:       vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1944,7 +1944,7 @@ entry:
 define <vscale x 64 x i8> @intrinsic_vxor_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv64i8_nxv64i8_i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
 ; CHECK:       vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1972,7 +1972,7 @@ entry:
 define <vscale x 1 x i16> @intrinsic_vxor_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv1i16_nxv1i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2000,7 +2000,7 @@ entry:
 define <vscale x 2 x i16> @intrinsic_vxor_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv2i16_nxv2i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2028,7 +2028,7 @@ entry:
 define <vscale x 4 x i16> @intrinsic_vxor_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv4i16_nxv4i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2056,7 +2056,7 @@ entry:
 define <vscale x 8 x i16> @intrinsic_vxor_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv8i16_nxv8i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2084,7 +2084,7 @@ entry:
 define <vscale x 16 x i16> @intrinsic_vxor_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv16i16_nxv16i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2112,7 +2112,7 @@ entry:
 define <vscale x 32 x i16> @intrinsic_vxor_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv32i16_nxv32i16_i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
 ; CHECK:       vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -2140,7 +2140,7 @@ entry:
 define <vscale x 1 x i32> @intrinsic_vxor_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv1i32_nxv1i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2168,7 +2168,7 @@ entry:
 define <vscale x 2 x i32> @intrinsic_vxor_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv2i32_nxv2i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2196,7 +2196,7 @@ entry:
 define <vscale x 4 x i32> @intrinsic_vxor_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv4i32_nxv4i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2224,7 +2224,7 @@ entry:
 define <vscale x 8 x i32> @intrinsic_vxor_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv8i32_nxv8i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2252,7 +2252,7 @@ entry:
 define <vscale x 16 x i32> @intrinsic_vxor_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv16i32_nxv16i32_i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
 ; CHECK:       vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2280,7 +2280,7 @@ entry:
 define <vscale x 1 x i64> @intrinsic_vxor_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv1i64_nxv1i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
 ; CHECK:       vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2308,7 +2308,7 @@ entry:
 define <vscale x 2 x i64> @intrinsic_vxor_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv2i64_nxv2i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
 ; CHECK:       vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2336,7 +2336,7 @@ entry:
 define <vscale x 4 x i64> @intrinsic_vxor_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv4i64_nxv4i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
 ; CHECK:       vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2364,7 +2364,7 @@ entry:
 define <vscale x 8 x i64> @intrinsic_vxor_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
 entry:
 ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv8i64_nxv8i64_i64
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
 ; CHECK:       vxor.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,


        


More information about the llvm-branch-commits mailing list