[clang] ffe03ff - [RISCV] Fix incorrect policy implement for unmasked vslidedown and vslideup.

Zakk Chen via cfe-commits cfe-commits at lists.llvm.org
Mon Apr 25 09:20:22 PDT 2022


Author: Zakk Chen
Date: 2022-04-25T09:18:41-07:00
New Revision: ffe03ff75c26d598ce71d19c5eacf528f63a670b

URL: https://github.com/llvm/llvm-project/commit/ffe03ff75c26d598ce71d19c5eacf528f63a670b
DIFF: https://github.com/llvm/llvm-project/commit/ffe03ff75c26d598ce71d19c5eacf528f63a670b.diff

LOG: [RISCV] Fix incorrect policy implement for unmasked vslidedown and vslideup.

vslideup works by leaving elements 0<i<OFFSET undisturbed.
so it need the destination operand as input for correctness
regardless of policy. Add a operand to indicate policy.

We also add policy operand for unmaksed vslidedown to keep the interface consistent with vslideup
because vslidedown have only undisturbed at 0<i<vstart but user have no way to control of vstart.

Reviewed By: rogfer01, craig.topper

Differential Revision: https://reviews.llvm.org/D124186

Added: 
    

Modified: 
    clang/include/clang/Basic/riscv_vector.td
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslidedown.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslideup.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vslidedown.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vslideup.c
    llvm/include/llvm/IR/IntrinsicsRISCV.td
    llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
    llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
    llvm/test/CodeGen/RISCV/rvv/emergency-slot.mir
    llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-conv.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-i1.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-load-store.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll
    llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll
    llvm/test/CodeGen/RISCV/rvv/insertelt-fp-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/insertelt-fp-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/insertelt-i1.ll
    llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/masked-tama.ll
    llvm/test/CodeGen/RISCV/rvv/setcc-fp.ll
    llvm/test/CodeGen/RISCV/rvv/setcc-integer.ll
    llvm/test/CodeGen/RISCV/rvv/unmasked-ta.ll
    llvm/test/CodeGen/RISCV/rvv/vector-splice.ll
    llvm/test/CodeGen/RISCV/rvv/vslidedown-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vslidedown-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vslideup-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vslideup-rv64.ll

Removed: 
    


################################################################################
diff  --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td
index 4f825ea2e25b6..70fd5c82c612e 100644
--- a/clang/include/clang/Basic/riscv_vector.td
+++ b/clang/include/clang/Basic/riscv_vector.td
@@ -443,7 +443,8 @@ class RVVMaskOp0Builtin<string prototype> : RVVOp0Builtin<"m", prototype, "c"> {
   let HasMaskedOffOperand = false;
 }
 
-let HasMaskedOffOperand = false in {
+let UnMaskedPolicy = HasPolicyOperand,
+    HasMaskedOffOperand = false in {
   multiclass RVVSlideBuiltinSet {
     defm "" : RVVOutBuiltinSet<NAME, "csilxfd",
                                [["vx","v", "vvvz"]]>;

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslidedown.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslidedown.c
index ed45f08c94b04..04c8710d81c6b 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslidedown.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslidedown.c
@@ -7,7 +7,7 @@
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vslidedown_vx_i8mf8(vint8mf8_t dst, vint8mf8_t src,
@@ -17,7 +17,7 @@ vint8mf8_t test_vslidedown_vx_i8mf8(vint8mf8_t dst, vint8mf8_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vslidedown_vx_i8mf4(vint8mf4_t dst, vint8mf4_t src,
@@ -27,7 +27,7 @@ vint8mf4_t test_vslidedown_vx_i8mf4(vint8mf4_t dst, vint8mf4_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vslidedown_vx_i8mf2(vint8mf2_t dst, vint8mf2_t src,
@@ -37,7 +37,7 @@ vint8mf2_t test_vslidedown_vx_i8mf2(vint8mf2_t dst, vint8mf2_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vslidedown_vx_i8m1(vint8m1_t dst, vint8m1_t src, size_t offset,
@@ -47,7 +47,7 @@ vint8m1_t test_vslidedown_vx_i8m1(vint8m1_t dst, vint8m1_t src, size_t offset,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vslidedown_vx_i8m2(vint8m2_t dst, vint8m2_t src, size_t offset,
@@ -57,7 +57,7 @@ vint8m2_t test_vslidedown_vx_i8m2(vint8m2_t dst, vint8m2_t src, size_t offset,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vslidedown_vx_i8m4(vint8m4_t dst, vint8m4_t src, size_t offset,
@@ -67,7 +67,7 @@ vint8m4_t test_vslidedown_vx_i8m4(vint8m4_t dst, vint8m4_t src, size_t offset,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vslidedown_vx_i8m8(vint8m8_t dst, vint8m8_t src, size_t offset,
@@ -77,7 +77,7 @@ vint8m8_t test_vslidedown_vx_i8m8(vint8m8_t dst, vint8m8_t src, size_t offset,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vslidedown_vx_i16mf4(vint16mf4_t dst, vint16mf4_t src,
@@ -87,7 +87,7 @@ vint16mf4_t test_vslidedown_vx_i16mf4(vint16mf4_t dst, vint16mf4_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vslidedown_vx_i16mf2(vint16mf2_t dst, vint16mf2_t src,
@@ -97,7 +97,7 @@ vint16mf2_t test_vslidedown_vx_i16mf2(vint16mf2_t dst, vint16mf2_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vslidedown_vx_i16m1(vint16m1_t dst, vint16m1_t src,
@@ -107,7 +107,7 @@ vint16m1_t test_vslidedown_vx_i16m1(vint16m1_t dst, vint16m1_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vslidedown_vx_i16m2(vint16m2_t dst, vint16m2_t src,
@@ -117,7 +117,7 @@ vint16m2_t test_vslidedown_vx_i16m2(vint16m2_t dst, vint16m2_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vslidedown_vx_i16m4(vint16m4_t dst, vint16m4_t src,
@@ -127,7 +127,7 @@ vint16m4_t test_vslidedown_vx_i16m4(vint16m4_t dst, vint16m4_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vslidedown_vx_i16m8(vint16m8_t dst, vint16m8_t src,
@@ -137,7 +137,7 @@ vint16m8_t test_vslidedown_vx_i16m8(vint16m8_t dst, vint16m8_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vslidedown_vx_i32mf2(vint32mf2_t dst, vint32mf2_t src,
@@ -147,7 +147,7 @@ vint32mf2_t test_vslidedown_vx_i32mf2(vint32mf2_t dst, vint32mf2_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vslidedown_vx_i32m1(vint32m1_t dst, vint32m1_t src,
@@ -157,7 +157,7 @@ vint32m1_t test_vslidedown_vx_i32m1(vint32m1_t dst, vint32m1_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vslidedown_vx_i32m2(vint32m2_t dst, vint32m2_t src,
@@ -167,7 +167,7 @@ vint32m2_t test_vslidedown_vx_i32m2(vint32m2_t dst, vint32m2_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vslidedown_vx_i32m4(vint32m4_t dst, vint32m4_t src,
@@ -177,7 +177,7 @@ vint32m4_t test_vslidedown_vx_i32m4(vint32m4_t dst, vint32m4_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vslidedown_vx_i32m8(vint32m8_t dst, vint32m8_t src,
@@ -187,7 +187,7 @@ vint32m8_t test_vslidedown_vx_i32m8(vint32m8_t dst, vint32m8_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vslidedown_vx_i64m1(vint64m1_t dst, vint64m1_t src,
@@ -197,7 +197,7 @@ vint64m1_t test_vslidedown_vx_i64m1(vint64m1_t dst, vint64m1_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vslidedown_vx_i64m2(vint64m2_t dst, vint64m2_t src,
@@ -207,7 +207,7 @@ vint64m2_t test_vslidedown_vx_i64m2(vint64m2_t dst, vint64m2_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vslidedown_vx_i64m4(vint64m4_t dst, vint64m4_t src,
@@ -217,7 +217,7 @@ vint64m4_t test_vslidedown_vx_i64m4(vint64m4_t dst, vint64m4_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vslidedown_vx_i64m8(vint64m8_t dst, vint64m8_t src,
@@ -227,7 +227,7 @@ vint64m8_t test_vslidedown_vx_i64m8(vint64m8_t dst, vint64m8_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vslidedown_vx_u8mf8(vuint8mf8_t dst, vuint8mf8_t src,
@@ -237,7 +237,7 @@ vuint8mf8_t test_vslidedown_vx_u8mf8(vuint8mf8_t dst, vuint8mf8_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vslidedown_vx_u8mf4(vuint8mf4_t dst, vuint8mf4_t src,
@@ -247,7 +247,7 @@ vuint8mf4_t test_vslidedown_vx_u8mf4(vuint8mf4_t dst, vuint8mf4_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vslidedown_vx_u8mf2(vuint8mf2_t dst, vuint8mf2_t src,
@@ -257,7 +257,7 @@ vuint8mf2_t test_vslidedown_vx_u8mf2(vuint8mf2_t dst, vuint8mf2_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vslidedown_vx_u8m1(vuint8m1_t dst, vuint8m1_t src,
@@ -267,7 +267,7 @@ vuint8m1_t test_vslidedown_vx_u8m1(vuint8m1_t dst, vuint8m1_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vslidedown_vx_u8m2(vuint8m2_t dst, vuint8m2_t src,
@@ -277,7 +277,7 @@ vuint8m2_t test_vslidedown_vx_u8m2(vuint8m2_t dst, vuint8m2_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vslidedown_vx_u8m4(vuint8m4_t dst, vuint8m4_t src,
@@ -287,7 +287,7 @@ vuint8m4_t test_vslidedown_vx_u8m4(vuint8m4_t dst, vuint8m4_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vslidedown_vx_u8m8(vuint8m8_t dst, vuint8m8_t src,
@@ -297,7 +297,7 @@ vuint8m8_t test_vslidedown_vx_u8m8(vuint8m8_t dst, vuint8m8_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vslidedown_vx_u16mf4(vuint16mf4_t dst, vuint16mf4_t src,
@@ -307,7 +307,7 @@ vuint16mf4_t test_vslidedown_vx_u16mf4(vuint16mf4_t dst, vuint16mf4_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vslidedown_vx_u16mf2(vuint16mf2_t dst, vuint16mf2_t src,
@@ -317,7 +317,7 @@ vuint16mf2_t test_vslidedown_vx_u16mf2(vuint16mf2_t dst, vuint16mf2_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vslidedown_vx_u16m1(vuint16m1_t dst, vuint16m1_t src,
@@ -327,7 +327,7 @@ vuint16m1_t test_vslidedown_vx_u16m1(vuint16m1_t dst, vuint16m1_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vslidedown_vx_u16m2(vuint16m2_t dst, vuint16m2_t src,
@@ -337,7 +337,7 @@ vuint16m2_t test_vslidedown_vx_u16m2(vuint16m2_t dst, vuint16m2_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vslidedown_vx_u16m4(vuint16m4_t dst, vuint16m4_t src,
@@ -347,7 +347,7 @@ vuint16m4_t test_vslidedown_vx_u16m4(vuint16m4_t dst, vuint16m4_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vslidedown_vx_u16m8(vuint16m8_t dst, vuint16m8_t src,
@@ -357,7 +357,7 @@ vuint16m8_t test_vslidedown_vx_u16m8(vuint16m8_t dst, vuint16m8_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vslidedown_vx_u32mf2(vuint32mf2_t dst, vuint32mf2_t src,
@@ -367,7 +367,7 @@ vuint32mf2_t test_vslidedown_vx_u32mf2(vuint32mf2_t dst, vuint32mf2_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vslidedown_vx_u32m1(vuint32m1_t dst, vuint32m1_t src,
@@ -377,7 +377,7 @@ vuint32m1_t test_vslidedown_vx_u32m1(vuint32m1_t dst, vuint32m1_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vslidedown_vx_u32m2(vuint32m2_t dst, vuint32m2_t src,
@@ -387,7 +387,7 @@ vuint32m2_t test_vslidedown_vx_u32m2(vuint32m2_t dst, vuint32m2_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vslidedown_vx_u32m4(vuint32m4_t dst, vuint32m4_t src,
@@ -397,7 +397,7 @@ vuint32m4_t test_vslidedown_vx_u32m4(vuint32m4_t dst, vuint32m4_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vslidedown_vx_u32m8(vuint32m8_t dst, vuint32m8_t src,
@@ -407,7 +407,7 @@ vuint32m8_t test_vslidedown_vx_u32m8(vuint32m8_t dst, vuint32m8_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vslidedown_vx_u64m1(vuint64m1_t dst, vuint64m1_t src,
@@ -417,7 +417,7 @@ vuint64m1_t test_vslidedown_vx_u64m1(vuint64m1_t dst, vuint64m1_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vslidedown_vx_u64m2(vuint64m2_t dst, vuint64m2_t src,
@@ -427,7 +427,7 @@ vuint64m2_t test_vslidedown_vx_u64m2(vuint64m2_t dst, vuint64m2_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vslidedown_vx_u64m4(vuint64m4_t dst, vuint64m4_t src,
@@ -437,7 +437,7 @@ vuint64m4_t test_vslidedown_vx_u64m4(vuint64m4_t dst, vuint64m4_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vslidedown_vx_u64m8(vuint64m8_t dst, vuint64m8_t src,
@@ -447,7 +447,7 @@ vuint64m8_t test_vslidedown_vx_u64m8(vuint64m8_t dst, vuint64m8_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslidedown.nxv1f32.i64(<vscale x 1 x float> [[DST:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslidedown.nxv1f32.i64(<vscale x 1 x float> [[DST:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vslidedown_vx_f32mf2(vfloat32mf2_t dst, vfloat32mf2_t src,
@@ -457,7 +457,7 @@ vfloat32mf2_t test_vslidedown_vx_f32mf2(vfloat32mf2_t dst, vfloat32mf2_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslidedown.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslidedown.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vslidedown_vx_f32m1(vfloat32m1_t dst, vfloat32m1_t src,
@@ -467,7 +467,7 @@ vfloat32m1_t test_vslidedown_vx_f32m1(vfloat32m1_t dst, vfloat32m1_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslidedown.nxv4f32.i64(<vscale x 4 x float> [[DST:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslidedown.nxv4f32.i64(<vscale x 4 x float> [[DST:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vslidedown_vx_f32m2(vfloat32m2_t dst, vfloat32m2_t src,
@@ -477,7 +477,7 @@ vfloat32m2_t test_vslidedown_vx_f32m2(vfloat32m2_t dst, vfloat32m2_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslidedown.nxv8f32.i64(<vscale x 8 x float> [[DST:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslidedown.nxv8f32.i64(<vscale x 8 x float> [[DST:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vslidedown_vx_f32m4(vfloat32m4_t dst, vfloat32m4_t src,
@@ -487,7 +487,7 @@ vfloat32m4_t test_vslidedown_vx_f32m4(vfloat32m4_t dst, vfloat32m4_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslidedown.nxv16f32.i64(<vscale x 16 x float> [[DST:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslidedown.nxv16f32.i64(<vscale x 16 x float> [[DST:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vslidedown_vx_f32m8(vfloat32m8_t dst, vfloat32m8_t src,
@@ -497,7 +497,7 @@ vfloat32m8_t test_vslidedown_vx_f32m8(vfloat32m8_t dst, vfloat32m8_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslidedown.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslidedown.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vslidedown_vx_f64m1(vfloat64m1_t dst, vfloat64m1_t src,
@@ -507,7 +507,7 @@ vfloat64m1_t test_vslidedown_vx_f64m1(vfloat64m1_t dst, vfloat64m1_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslidedown.nxv2f64.i64(<vscale x 2 x double> [[DST:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslidedown.nxv2f64.i64(<vscale x 2 x double> [[DST:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vslidedown_vx_f64m2(vfloat64m2_t dst, vfloat64m2_t src,
@@ -517,7 +517,7 @@ vfloat64m2_t test_vslidedown_vx_f64m2(vfloat64m2_t dst, vfloat64m2_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslidedown.nxv4f64.i64(<vscale x 4 x double> [[DST:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslidedown.nxv4f64.i64(<vscale x 4 x double> [[DST:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vslidedown_vx_f64m4(vfloat64m4_t dst, vfloat64m4_t src,
@@ -527,7 +527,7 @@ vfloat64m4_t test_vslidedown_vx_f64m4(vfloat64m4_t dst, vfloat64m4_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslidedown.nxv8f64.i64(<vscale x 8 x double> [[DST:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslidedown.nxv8f64.i64(<vscale x 8 x double> [[DST:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vslidedown_vx_f64m8(vfloat64m8_t dst, vfloat64m8_t src,

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslideup.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslideup.c
index 380296f0e4946..0d082ab8f5bb0 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslideup.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslideup.c
@@ -7,7 +7,7 @@
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vslideup_vx_i8mf8(vint8mf8_t dst, vint8mf8_t src, size_t offset,
@@ -17,7 +17,7 @@ vint8mf8_t test_vslideup_vx_i8mf8(vint8mf8_t dst, vint8mf8_t src, size_t offset,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vslideup_vx_i8mf4(vint8mf4_t dst, vint8mf4_t src, size_t offset,
@@ -27,7 +27,7 @@ vint8mf4_t test_vslideup_vx_i8mf4(vint8mf4_t dst, vint8mf4_t src, size_t offset,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vslideup_vx_i8mf2(vint8mf2_t dst, vint8mf2_t src, size_t offset,
@@ -37,7 +37,7 @@ vint8mf2_t test_vslideup_vx_i8mf2(vint8mf2_t dst, vint8mf2_t src, size_t offset,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vslideup_vx_i8m1(vint8m1_t dst, vint8m1_t src, size_t offset,
@@ -47,7 +47,7 @@ vint8m1_t test_vslideup_vx_i8m1(vint8m1_t dst, vint8m1_t src, size_t offset,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vslideup_vx_i8m2(vint8m2_t dst, vint8m2_t src, size_t offset,
@@ -57,7 +57,7 @@ vint8m2_t test_vslideup_vx_i8m2(vint8m2_t dst, vint8m2_t src, size_t offset,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vslideup_vx_i8m4(vint8m4_t dst, vint8m4_t src, size_t offset,
@@ -67,7 +67,7 @@ vint8m4_t test_vslideup_vx_i8m4(vint8m4_t dst, vint8m4_t src, size_t offset,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslideup.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslideup.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vslideup_vx_i8m8(vint8m8_t dst, vint8m8_t src, size_t offset,
@@ -77,7 +77,7 @@ vint8m8_t test_vslideup_vx_i8m8(vint8m8_t dst, vint8m8_t src, size_t offset,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vslideup_vx_i16mf4(vint16mf4_t dst, vint16mf4_t src,
@@ -87,7 +87,7 @@ vint16mf4_t test_vslideup_vx_i16mf4(vint16mf4_t dst, vint16mf4_t src,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vslideup_vx_i16mf2(vint16mf2_t dst, vint16mf2_t src,
@@ -97,7 +97,7 @@ vint16mf2_t test_vslideup_vx_i16mf2(vint16mf2_t dst, vint16mf2_t src,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vslideup_vx_i16m1(vint16m1_t dst, vint16m1_t src, size_t offset,
@@ -107,7 +107,7 @@ vint16m1_t test_vslideup_vx_i16m1(vint16m1_t dst, vint16m1_t src, size_t offset,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vslideup_vx_i16m2(vint16m2_t dst, vint16m2_t src, size_t offset,
@@ -117,7 +117,7 @@ vint16m2_t test_vslideup_vx_i16m2(vint16m2_t dst, vint16m2_t src, size_t offset,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vslideup_vx_i16m4(vint16m4_t dst, vint16m4_t src, size_t offset,
@@ -127,7 +127,7 @@ vint16m4_t test_vslideup_vx_i16m4(vint16m4_t dst, vint16m4_t src, size_t offset,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslideup.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslideup.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vslideup_vx_i16m8(vint16m8_t dst, vint16m8_t src, size_t offset,
@@ -137,7 +137,7 @@ vint16m8_t test_vslideup_vx_i16m8(vint16m8_t dst, vint16m8_t src, size_t offset,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vslideup_vx_i32mf2(vint32mf2_t dst, vint32mf2_t src,
@@ -147,7 +147,7 @@ vint32mf2_t test_vslideup_vx_i32mf2(vint32mf2_t dst, vint32mf2_t src,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vslideup_vx_i32m1(vint32m1_t dst, vint32m1_t src, size_t offset,
@@ -157,7 +157,7 @@ vint32m1_t test_vslideup_vx_i32m1(vint32m1_t dst, vint32m1_t src, size_t offset,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vslideup_vx_i32m2(vint32m2_t dst, vint32m2_t src, size_t offset,
@@ -167,7 +167,7 @@ vint32m2_t test_vslideup_vx_i32m2(vint32m2_t dst, vint32m2_t src, size_t offset,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vslideup_vx_i32m4(vint32m4_t dst, vint32m4_t src, size_t offset,
@@ -177,7 +177,7 @@ vint32m4_t test_vslideup_vx_i32m4(vint32m4_t dst, vint32m4_t src, size_t offset,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslideup.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslideup.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vslideup_vx_i32m8(vint32m8_t dst, vint32m8_t src, size_t offset,
@@ -187,7 +187,7 @@ vint32m8_t test_vslideup_vx_i32m8(vint32m8_t dst, vint32m8_t src, size_t offset,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vslideup_vx_i64m1(vint64m1_t dst, vint64m1_t src, size_t offset,
@@ -197,7 +197,7 @@ vint64m1_t test_vslideup_vx_i64m1(vint64m1_t dst, vint64m1_t src, size_t offset,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vslideup_vx_i64m2(vint64m2_t dst, vint64m2_t src, size_t offset,
@@ -207,7 +207,7 @@ vint64m2_t test_vslideup_vx_i64m2(vint64m2_t dst, vint64m2_t src, size_t offset,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vslideup_vx_i64m4(vint64m4_t dst, vint64m4_t src, size_t offset,
@@ -217,7 +217,7 @@ vint64m4_t test_vslideup_vx_i64m4(vint64m4_t dst, vint64m4_t src, size_t offset,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslideup.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslideup.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vslideup_vx_i64m8(vint64m8_t dst, vint64m8_t src, size_t offset,
@@ -227,7 +227,7 @@ vint64m8_t test_vslideup_vx_i64m8(vint64m8_t dst, vint64m8_t src, size_t offset,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vslideup_vx_u8mf8(vuint8mf8_t dst, vuint8mf8_t src,
@@ -237,7 +237,7 @@ vuint8mf8_t test_vslideup_vx_u8mf8(vuint8mf8_t dst, vuint8mf8_t src,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vslideup_vx_u8mf4(vuint8mf4_t dst, vuint8mf4_t src,
@@ -247,7 +247,7 @@ vuint8mf4_t test_vslideup_vx_u8mf4(vuint8mf4_t dst, vuint8mf4_t src,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vslideup_vx_u8mf2(vuint8mf2_t dst, vuint8mf2_t src,
@@ -257,7 +257,7 @@ vuint8mf2_t test_vslideup_vx_u8mf2(vuint8mf2_t dst, vuint8mf2_t src,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vslideup_vx_u8m1(vuint8m1_t dst, vuint8m1_t src, size_t offset,
@@ -267,7 +267,7 @@ vuint8m1_t test_vslideup_vx_u8m1(vuint8m1_t dst, vuint8m1_t src, size_t offset,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vslideup_vx_u8m2(vuint8m2_t dst, vuint8m2_t src, size_t offset,
@@ -277,7 +277,7 @@ vuint8m2_t test_vslideup_vx_u8m2(vuint8m2_t dst, vuint8m2_t src, size_t offset,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vslideup_vx_u8m4(vuint8m4_t dst, vuint8m4_t src, size_t offset,
@@ -287,7 +287,7 @@ vuint8m4_t test_vslideup_vx_u8m4(vuint8m4_t dst, vuint8m4_t src, size_t offset,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslideup.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslideup.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vslideup_vx_u8m8(vuint8m8_t dst, vuint8m8_t src, size_t offset,
@@ -297,7 +297,7 @@ vuint8m8_t test_vslideup_vx_u8m8(vuint8m8_t dst, vuint8m8_t src, size_t offset,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vslideup_vx_u16mf4(vuint16mf4_t dst, vuint16mf4_t src,
@@ -307,7 +307,7 @@ vuint16mf4_t test_vslideup_vx_u16mf4(vuint16mf4_t dst, vuint16mf4_t src,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vslideup_vx_u16mf2(vuint16mf2_t dst, vuint16mf2_t src,
@@ -317,7 +317,7 @@ vuint16mf2_t test_vslideup_vx_u16mf2(vuint16mf2_t dst, vuint16mf2_t src,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vslideup_vx_u16m1(vuint16m1_t dst, vuint16m1_t src,
@@ -327,7 +327,7 @@ vuint16m1_t test_vslideup_vx_u16m1(vuint16m1_t dst, vuint16m1_t src,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vslideup_vx_u16m2(vuint16m2_t dst, vuint16m2_t src,
@@ -337,7 +337,7 @@ vuint16m2_t test_vslideup_vx_u16m2(vuint16m2_t dst, vuint16m2_t src,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vslideup_vx_u16m4(vuint16m4_t dst, vuint16m4_t src,
@@ -347,7 +347,7 @@ vuint16m4_t test_vslideup_vx_u16m4(vuint16m4_t dst, vuint16m4_t src,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslideup.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslideup.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vslideup_vx_u16m8(vuint16m8_t dst, vuint16m8_t src,
@@ -357,7 +357,7 @@ vuint16m8_t test_vslideup_vx_u16m8(vuint16m8_t dst, vuint16m8_t src,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vslideup_vx_u32mf2(vuint32mf2_t dst, vuint32mf2_t src,
@@ -367,7 +367,7 @@ vuint32mf2_t test_vslideup_vx_u32mf2(vuint32mf2_t dst, vuint32mf2_t src,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vslideup_vx_u32m1(vuint32m1_t dst, vuint32m1_t src,
@@ -377,7 +377,7 @@ vuint32m1_t test_vslideup_vx_u32m1(vuint32m1_t dst, vuint32m1_t src,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vslideup_vx_u32m2(vuint32m2_t dst, vuint32m2_t src,
@@ -387,7 +387,7 @@ vuint32m2_t test_vslideup_vx_u32m2(vuint32m2_t dst, vuint32m2_t src,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vslideup_vx_u32m4(vuint32m4_t dst, vuint32m4_t src,
@@ -397,7 +397,7 @@ vuint32m4_t test_vslideup_vx_u32m4(vuint32m4_t dst, vuint32m4_t src,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslideup.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslideup.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vslideup_vx_u32m8(vuint32m8_t dst, vuint32m8_t src,
@@ -407,7 +407,7 @@ vuint32m8_t test_vslideup_vx_u32m8(vuint32m8_t dst, vuint32m8_t src,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vslideup_vx_u64m1(vuint64m1_t dst, vuint64m1_t src,
@@ -417,7 +417,7 @@ vuint64m1_t test_vslideup_vx_u64m1(vuint64m1_t dst, vuint64m1_t src,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vslideup_vx_u64m2(vuint64m2_t dst, vuint64m2_t src,
@@ -427,7 +427,7 @@ vuint64m2_t test_vslideup_vx_u64m2(vuint64m2_t dst, vuint64m2_t src,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vslideup_vx_u64m4(vuint64m4_t dst, vuint64m4_t src,
@@ -437,7 +437,7 @@ vuint64m4_t test_vslideup_vx_u64m4(vuint64m4_t dst, vuint64m4_t src,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslideup.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslideup.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vslideup_vx_u64m8(vuint64m8_t dst, vuint64m8_t src,
@@ -447,7 +447,7 @@ vuint64m8_t test_vslideup_vx_u64m8(vuint64m8_t dst, vuint64m8_t src,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslideup.nxv1f32.i64(<vscale x 1 x float> [[DST:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslideup.nxv1f32.i64(<vscale x 1 x float> [[DST:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vslideup_vx_f32mf2(vfloat32mf2_t dst, vfloat32mf2_t src,
@@ -457,7 +457,7 @@ vfloat32mf2_t test_vslideup_vx_f32mf2(vfloat32mf2_t dst, vfloat32mf2_t src,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslideup.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslideup.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vslideup_vx_f32m1(vfloat32m1_t dst, vfloat32m1_t src,
@@ -467,7 +467,7 @@ vfloat32m1_t test_vslideup_vx_f32m1(vfloat32m1_t dst, vfloat32m1_t src,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslideup.nxv4f32.i64(<vscale x 4 x float> [[DST:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslideup.nxv4f32.i64(<vscale x 4 x float> [[DST:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vslideup_vx_f32m2(vfloat32m2_t dst, vfloat32m2_t src,
@@ -477,7 +477,7 @@ vfloat32m2_t test_vslideup_vx_f32m2(vfloat32m2_t dst, vfloat32m2_t src,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslideup.nxv8f32.i64(<vscale x 8 x float> [[DST:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslideup.nxv8f32.i64(<vscale x 8 x float> [[DST:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vslideup_vx_f32m4(vfloat32m4_t dst, vfloat32m4_t src,
@@ -487,7 +487,7 @@ vfloat32m4_t test_vslideup_vx_f32m4(vfloat32m4_t dst, vfloat32m4_t src,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslideup.nxv16f32.i64(<vscale x 16 x float> [[DST:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslideup.nxv16f32.i64(<vscale x 16 x float> [[DST:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vslideup_vx_f32m8(vfloat32m8_t dst, vfloat32m8_t src,
@@ -497,7 +497,7 @@ vfloat32m8_t test_vslideup_vx_f32m8(vfloat32m8_t dst, vfloat32m8_t src,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslideup.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslideup.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vslideup_vx_f64m1(vfloat64m1_t dst, vfloat64m1_t src,
@@ -507,7 +507,7 @@ vfloat64m1_t test_vslideup_vx_f64m1(vfloat64m1_t dst, vfloat64m1_t src,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslideup.nxv2f64.i64(<vscale x 2 x double> [[DST:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslideup.nxv2f64.i64(<vscale x 2 x double> [[DST:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vslideup_vx_f64m2(vfloat64m2_t dst, vfloat64m2_t src,
@@ -517,7 +517,7 @@ vfloat64m2_t test_vslideup_vx_f64m2(vfloat64m2_t dst, vfloat64m2_t src,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslideup.nxv4f64.i64(<vscale x 4 x double> [[DST:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslideup.nxv4f64.i64(<vscale x 4 x double> [[DST:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vslideup_vx_f64m4(vfloat64m4_t dst, vfloat64m4_t src,
@@ -527,7 +527,7 @@ vfloat64m4_t test_vslideup_vx_f64m4(vfloat64m4_t dst, vfloat64m4_t src,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslideup.nxv8f64.i64(<vscale x 8 x double> [[DST:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslideup.nxv8f64.i64(<vscale x 8 x double> [[DST:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vslideup_vx_f64m8(vfloat64m8_t dst, vfloat64m8_t src,

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vslidedown.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslidedown.c
index 7e72e1c75879d..774d51f3f9e38 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vslidedown.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslidedown.c
@@ -8,7 +8,7 @@
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vslidedown_vx_i8mf8(vint8mf8_t dst, vint8mf8_t src,
@@ -18,7 +18,7 @@ vint8mf8_t test_vslidedown_vx_i8mf8(vint8mf8_t dst, vint8mf8_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vslidedown_vx_i8mf4(vint8mf4_t dst, vint8mf4_t src,
@@ -28,7 +28,7 @@ vint8mf4_t test_vslidedown_vx_i8mf4(vint8mf4_t dst, vint8mf4_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vslidedown_vx_i8mf2(vint8mf2_t dst, vint8mf2_t src,
@@ -38,7 +38,7 @@ vint8mf2_t test_vslidedown_vx_i8mf2(vint8mf2_t dst, vint8mf2_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vslidedown_vx_i8m1(vint8m1_t dst, vint8m1_t src, size_t offset,
@@ -48,7 +48,7 @@ vint8m1_t test_vslidedown_vx_i8m1(vint8m1_t dst, vint8m1_t src, size_t offset,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vslidedown_vx_i8m2(vint8m2_t dst, vint8m2_t src, size_t offset,
@@ -58,7 +58,7 @@ vint8m2_t test_vslidedown_vx_i8m2(vint8m2_t dst, vint8m2_t src, size_t offset,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vslidedown_vx_i8m4(vint8m4_t dst, vint8m4_t src, size_t offset,
@@ -68,7 +68,7 @@ vint8m4_t test_vslidedown_vx_i8m4(vint8m4_t dst, vint8m4_t src, size_t offset,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vslidedown_vx_i8m8(vint8m8_t dst, vint8m8_t src, size_t offset,
@@ -78,7 +78,7 @@ vint8m8_t test_vslidedown_vx_i8m8(vint8m8_t dst, vint8m8_t src, size_t offset,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vslidedown_vx_i16mf4(vint16mf4_t dst, vint16mf4_t src,
@@ -88,7 +88,7 @@ vint16mf4_t test_vslidedown_vx_i16mf4(vint16mf4_t dst, vint16mf4_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vslidedown_vx_i16mf2(vint16mf2_t dst, vint16mf2_t src,
@@ -98,7 +98,7 @@ vint16mf2_t test_vslidedown_vx_i16mf2(vint16mf2_t dst, vint16mf2_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vslidedown_vx_i16m1(vint16m1_t dst, vint16m1_t src,
@@ -108,7 +108,7 @@ vint16m1_t test_vslidedown_vx_i16m1(vint16m1_t dst, vint16m1_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vslidedown_vx_i16m2(vint16m2_t dst, vint16m2_t src,
@@ -118,7 +118,7 @@ vint16m2_t test_vslidedown_vx_i16m2(vint16m2_t dst, vint16m2_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vslidedown_vx_i16m4(vint16m4_t dst, vint16m4_t src,
@@ -128,7 +128,7 @@ vint16m4_t test_vslidedown_vx_i16m4(vint16m4_t dst, vint16m4_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vslidedown_vx_i16m8(vint16m8_t dst, vint16m8_t src,
@@ -138,7 +138,7 @@ vint16m8_t test_vslidedown_vx_i16m8(vint16m8_t dst, vint16m8_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vslidedown_vx_i32mf2(vint32mf2_t dst, vint32mf2_t src,
@@ -148,7 +148,7 @@ vint32mf2_t test_vslidedown_vx_i32mf2(vint32mf2_t dst, vint32mf2_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vslidedown_vx_i32m1(vint32m1_t dst, vint32m1_t src,
@@ -158,7 +158,7 @@ vint32m1_t test_vslidedown_vx_i32m1(vint32m1_t dst, vint32m1_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vslidedown_vx_i32m2(vint32m2_t dst, vint32m2_t src,
@@ -168,7 +168,7 @@ vint32m2_t test_vslidedown_vx_i32m2(vint32m2_t dst, vint32m2_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vslidedown_vx_i32m4(vint32m4_t dst, vint32m4_t src,
@@ -178,7 +178,7 @@ vint32m4_t test_vslidedown_vx_i32m4(vint32m4_t dst, vint32m4_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vslidedown_vx_i32m8(vint32m8_t dst, vint32m8_t src,
@@ -188,7 +188,7 @@ vint32m8_t test_vslidedown_vx_i32m8(vint32m8_t dst, vint32m8_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vslidedown_vx_i64m1(vint64m1_t dst, vint64m1_t src,
@@ -198,7 +198,7 @@ vint64m1_t test_vslidedown_vx_i64m1(vint64m1_t dst, vint64m1_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vslidedown_vx_i64m2(vint64m2_t dst, vint64m2_t src,
@@ -208,7 +208,7 @@ vint64m2_t test_vslidedown_vx_i64m2(vint64m2_t dst, vint64m2_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vslidedown_vx_i64m4(vint64m4_t dst, vint64m4_t src,
@@ -218,7 +218,7 @@ vint64m4_t test_vslidedown_vx_i64m4(vint64m4_t dst, vint64m4_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vslidedown_vx_i64m8(vint64m8_t dst, vint64m8_t src,
@@ -228,7 +228,7 @@ vint64m8_t test_vslidedown_vx_i64m8(vint64m8_t dst, vint64m8_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vslidedown_vx_u8mf8(vuint8mf8_t dst, vuint8mf8_t src,
@@ -238,7 +238,7 @@ vuint8mf8_t test_vslidedown_vx_u8mf8(vuint8mf8_t dst, vuint8mf8_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vslidedown_vx_u8mf4(vuint8mf4_t dst, vuint8mf4_t src,
@@ -248,7 +248,7 @@ vuint8mf4_t test_vslidedown_vx_u8mf4(vuint8mf4_t dst, vuint8mf4_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vslidedown_vx_u8mf2(vuint8mf2_t dst, vuint8mf2_t src,
@@ -258,7 +258,7 @@ vuint8mf2_t test_vslidedown_vx_u8mf2(vuint8mf2_t dst, vuint8mf2_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vslidedown_vx_u8m1(vuint8m1_t dst, vuint8m1_t src,
@@ -268,7 +268,7 @@ vuint8m1_t test_vslidedown_vx_u8m1(vuint8m1_t dst, vuint8m1_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vslidedown_vx_u8m2(vuint8m2_t dst, vuint8m2_t src,
@@ -278,7 +278,7 @@ vuint8m2_t test_vslidedown_vx_u8m2(vuint8m2_t dst, vuint8m2_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vslidedown_vx_u8m4(vuint8m4_t dst, vuint8m4_t src,
@@ -288,7 +288,7 @@ vuint8m4_t test_vslidedown_vx_u8m4(vuint8m4_t dst, vuint8m4_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vslidedown_vx_u8m8(vuint8m8_t dst, vuint8m8_t src,
@@ -298,7 +298,7 @@ vuint8m8_t test_vslidedown_vx_u8m8(vuint8m8_t dst, vuint8m8_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vslidedown_vx_u16mf4(vuint16mf4_t dst, vuint16mf4_t src,
@@ -308,7 +308,7 @@ vuint16mf4_t test_vslidedown_vx_u16mf4(vuint16mf4_t dst, vuint16mf4_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vslidedown_vx_u16mf2(vuint16mf2_t dst, vuint16mf2_t src,
@@ -318,7 +318,7 @@ vuint16mf2_t test_vslidedown_vx_u16mf2(vuint16mf2_t dst, vuint16mf2_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vslidedown_vx_u16m1(vuint16m1_t dst, vuint16m1_t src,
@@ -328,7 +328,7 @@ vuint16m1_t test_vslidedown_vx_u16m1(vuint16m1_t dst, vuint16m1_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vslidedown_vx_u16m2(vuint16m2_t dst, vuint16m2_t src,
@@ -338,7 +338,7 @@ vuint16m2_t test_vslidedown_vx_u16m2(vuint16m2_t dst, vuint16m2_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vslidedown_vx_u16m4(vuint16m4_t dst, vuint16m4_t src,
@@ -348,7 +348,7 @@ vuint16m4_t test_vslidedown_vx_u16m4(vuint16m4_t dst, vuint16m4_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vslidedown_vx_u16m8(vuint16m8_t dst, vuint16m8_t src,
@@ -358,7 +358,7 @@ vuint16m8_t test_vslidedown_vx_u16m8(vuint16m8_t dst, vuint16m8_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vslidedown_vx_u32mf2(vuint32mf2_t dst, vuint32mf2_t src,
@@ -368,7 +368,7 @@ vuint32mf2_t test_vslidedown_vx_u32mf2(vuint32mf2_t dst, vuint32mf2_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vslidedown_vx_u32m1(vuint32m1_t dst, vuint32m1_t src,
@@ -378,7 +378,7 @@ vuint32m1_t test_vslidedown_vx_u32m1(vuint32m1_t dst, vuint32m1_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vslidedown_vx_u32m2(vuint32m2_t dst, vuint32m2_t src,
@@ -388,7 +388,7 @@ vuint32m2_t test_vslidedown_vx_u32m2(vuint32m2_t dst, vuint32m2_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vslidedown_vx_u32m4(vuint32m4_t dst, vuint32m4_t src,
@@ -398,7 +398,7 @@ vuint32m4_t test_vslidedown_vx_u32m4(vuint32m4_t dst, vuint32m4_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vslidedown_vx_u32m8(vuint32m8_t dst, vuint32m8_t src,
@@ -408,7 +408,7 @@ vuint32m8_t test_vslidedown_vx_u32m8(vuint32m8_t dst, vuint32m8_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vslidedown_vx_u64m1(vuint64m1_t dst, vuint64m1_t src,
@@ -418,7 +418,7 @@ vuint64m1_t test_vslidedown_vx_u64m1(vuint64m1_t dst, vuint64m1_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vslidedown_vx_u64m2(vuint64m2_t dst, vuint64m2_t src,
@@ -428,7 +428,7 @@ vuint64m2_t test_vslidedown_vx_u64m2(vuint64m2_t dst, vuint64m2_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vslidedown_vx_u64m4(vuint64m4_t dst, vuint64m4_t src,
@@ -438,7 +438,7 @@ vuint64m4_t test_vslidedown_vx_u64m4(vuint64m4_t dst, vuint64m4_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vslidedown_vx_u64m8(vuint64m8_t dst, vuint64m8_t src,
@@ -448,7 +448,7 @@ vuint64m8_t test_vslidedown_vx_u64m8(vuint64m8_t dst, vuint64m8_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslidedown.nxv1f32.i64(<vscale x 1 x float> [[DST:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslidedown.nxv1f32.i64(<vscale x 1 x float> [[DST:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vslidedown_vx_f32mf2(vfloat32mf2_t dst, vfloat32mf2_t src,
@@ -458,7 +458,7 @@ vfloat32mf2_t test_vslidedown_vx_f32mf2(vfloat32mf2_t dst, vfloat32mf2_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslidedown.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslidedown.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vslidedown_vx_f32m1(vfloat32m1_t dst, vfloat32m1_t src,
@@ -468,7 +468,7 @@ vfloat32m1_t test_vslidedown_vx_f32m1(vfloat32m1_t dst, vfloat32m1_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslidedown.nxv4f32.i64(<vscale x 4 x float> [[DST:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslidedown.nxv4f32.i64(<vscale x 4 x float> [[DST:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vslidedown_vx_f32m2(vfloat32m2_t dst, vfloat32m2_t src,
@@ -478,7 +478,7 @@ vfloat32m2_t test_vslidedown_vx_f32m2(vfloat32m2_t dst, vfloat32m2_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslidedown.nxv8f32.i64(<vscale x 8 x float> [[DST:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslidedown.nxv8f32.i64(<vscale x 8 x float> [[DST:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vslidedown_vx_f32m4(vfloat32m4_t dst, vfloat32m4_t src,
@@ -488,7 +488,7 @@ vfloat32m4_t test_vslidedown_vx_f32m4(vfloat32m4_t dst, vfloat32m4_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslidedown.nxv16f32.i64(<vscale x 16 x float> [[DST:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslidedown.nxv16f32.i64(<vscale x 16 x float> [[DST:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vslidedown_vx_f32m8(vfloat32m8_t dst, vfloat32m8_t src,
@@ -498,7 +498,7 @@ vfloat32m8_t test_vslidedown_vx_f32m8(vfloat32m8_t dst, vfloat32m8_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslidedown.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslidedown.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vslidedown_vx_f64m1(vfloat64m1_t dst, vfloat64m1_t src,
@@ -508,7 +508,7 @@ vfloat64m1_t test_vslidedown_vx_f64m1(vfloat64m1_t dst, vfloat64m1_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslidedown.nxv2f64.i64(<vscale x 2 x double> [[DST:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslidedown.nxv2f64.i64(<vscale x 2 x double> [[DST:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vslidedown_vx_f64m2(vfloat64m2_t dst, vfloat64m2_t src,
@@ -518,7 +518,7 @@ vfloat64m2_t test_vslidedown_vx_f64m2(vfloat64m2_t dst, vfloat64m2_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslidedown.nxv4f64.i64(<vscale x 4 x double> [[DST:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslidedown.nxv4f64.i64(<vscale x 4 x double> [[DST:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vslidedown_vx_f64m4(vfloat64m4_t dst, vfloat64m4_t src,
@@ -528,7 +528,7 @@ vfloat64m4_t test_vslidedown_vx_f64m4(vfloat64m4_t dst, vfloat64m4_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslidedown.nxv8f64.i64(<vscale x 8 x double> [[DST:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslidedown.nxv8f64.i64(<vscale x 8 x double> [[DST:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vslidedown_vx_f64m8(vfloat64m8_t dst, vfloat64m8_t src,
@@ -1113,7 +1113,7 @@ vfloat64m8_t test_vslidedown_vx_f64m8_m(vbool8_t mask, vfloat64m8_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vslidedown.nxv1f16.i64(<vscale x 1 x half> [[DEST:%.*]], <vscale x 1 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vslidedown.nxv1f16.i64(<vscale x 1 x half> [[DEST:%.*]], <vscale x 1 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vslidedown_vx_f16mf4 (vfloat16mf4_t dest, vfloat16mf4_t src, size_t offset, size_t vl) {
@@ -1122,7 +1122,7 @@ vfloat16mf4_t test_vslidedown_vx_f16mf4 (vfloat16mf4_t dest, vfloat16mf4_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vslidedown.nxv2f16.i64(<vscale x 2 x half> [[DEST:%.*]], <vscale x 2 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vslidedown.nxv2f16.i64(<vscale x 2 x half> [[DEST:%.*]], <vscale x 2 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vslidedown_vx_f16mf2 (vfloat16mf2_t dest, vfloat16mf2_t src, size_t offset, size_t vl) {
@@ -1131,7 +1131,7 @@ vfloat16mf2_t test_vslidedown_vx_f16mf2 (vfloat16mf2_t dest, vfloat16mf2_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vslidedown.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vslidedown.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vslidedown_vx_f16m1 (vfloat16m1_t dest, vfloat16m1_t src, size_t offset, size_t vl) {
@@ -1140,7 +1140,7 @@ vfloat16m1_t test_vslidedown_vx_f16m1 (vfloat16m1_t dest, vfloat16m1_t src, size
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vslidedown.nxv8f16.i64(<vscale x 8 x half> [[DEST:%.*]], <vscale x 8 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vslidedown.nxv8f16.i64(<vscale x 8 x half> [[DEST:%.*]], <vscale x 8 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vslidedown_vx_f16m2 (vfloat16m2_t dest, vfloat16m2_t src, size_t offset, size_t vl) {
@@ -1149,7 +1149,7 @@ vfloat16m2_t test_vslidedown_vx_f16m2 (vfloat16m2_t dest, vfloat16m2_t src, size
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vslidedown.nxv16f16.i64(<vscale x 16 x half> [[DEST:%.*]], <vscale x 16 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vslidedown.nxv16f16.i64(<vscale x 16 x half> [[DEST:%.*]], <vscale x 16 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vslidedown_vx_f16m4 (vfloat16m4_t dest, vfloat16m4_t src, size_t offset, size_t vl) {
@@ -1158,7 +1158,7 @@ vfloat16m4_t test_vslidedown_vx_f16m4 (vfloat16m4_t dest, vfloat16m4_t src, size
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vslidedown.nxv32f16.i64(<vscale x 32 x half> [[DEST:%.*]], <vscale x 32 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vslidedown.nxv32f16.i64(<vscale x 32 x half> [[DEST:%.*]], <vscale x 32 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vslidedown_vx_f16m8 (vfloat16m8_t dest, vfloat16m8_t src, size_t offset, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vslideup.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslideup.c
index 8dc1dccafab02..da06400a6666f 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vslideup.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslideup.c
@@ -8,7 +8,7 @@
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vslideup_vx_i8mf8(vint8mf8_t dst, vint8mf8_t src, size_t offset,
@@ -18,7 +18,7 @@ vint8mf8_t test_vslideup_vx_i8mf8(vint8mf8_t dst, vint8mf8_t src, size_t offset,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vslideup_vx_i8mf4(vint8mf4_t dst, vint8mf4_t src, size_t offset,
@@ -28,7 +28,7 @@ vint8mf4_t test_vslideup_vx_i8mf4(vint8mf4_t dst, vint8mf4_t src, size_t offset,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vslideup_vx_i8mf2(vint8mf2_t dst, vint8mf2_t src, size_t offset,
@@ -38,7 +38,7 @@ vint8mf2_t test_vslideup_vx_i8mf2(vint8mf2_t dst, vint8mf2_t src, size_t offset,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vslideup_vx_i8m1(vint8m1_t dst, vint8m1_t src, size_t offset,
@@ -48,7 +48,7 @@ vint8m1_t test_vslideup_vx_i8m1(vint8m1_t dst, vint8m1_t src, size_t offset,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vslideup_vx_i8m2(vint8m2_t dst, vint8m2_t src, size_t offset,
@@ -58,7 +58,7 @@ vint8m2_t test_vslideup_vx_i8m2(vint8m2_t dst, vint8m2_t src, size_t offset,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vslideup_vx_i8m4(vint8m4_t dst, vint8m4_t src, size_t offset,
@@ -68,7 +68,7 @@ vint8m4_t test_vslideup_vx_i8m4(vint8m4_t dst, vint8m4_t src, size_t offset,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslideup.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslideup.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vslideup_vx_i8m8(vint8m8_t dst, vint8m8_t src, size_t offset,
@@ -78,7 +78,7 @@ vint8m8_t test_vslideup_vx_i8m8(vint8m8_t dst, vint8m8_t src, size_t offset,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vslideup_vx_i16mf4(vint16mf4_t dst, vint16mf4_t src,
@@ -88,7 +88,7 @@ vint16mf4_t test_vslideup_vx_i16mf4(vint16mf4_t dst, vint16mf4_t src,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vslideup_vx_i16mf2(vint16mf2_t dst, vint16mf2_t src,
@@ -98,7 +98,7 @@ vint16mf2_t test_vslideup_vx_i16mf2(vint16mf2_t dst, vint16mf2_t src,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vslideup_vx_i16m1(vint16m1_t dst, vint16m1_t src, size_t offset,
@@ -108,7 +108,7 @@ vint16m1_t test_vslideup_vx_i16m1(vint16m1_t dst, vint16m1_t src, size_t offset,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vslideup_vx_i16m2(vint16m2_t dst, vint16m2_t src, size_t offset,
@@ -118,7 +118,7 @@ vint16m2_t test_vslideup_vx_i16m2(vint16m2_t dst, vint16m2_t src, size_t offset,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vslideup_vx_i16m4(vint16m4_t dst, vint16m4_t src, size_t offset,
@@ -128,7 +128,7 @@ vint16m4_t test_vslideup_vx_i16m4(vint16m4_t dst, vint16m4_t src, size_t offset,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslideup.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslideup.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vslideup_vx_i16m8(vint16m8_t dst, vint16m8_t src, size_t offset,
@@ -138,7 +138,7 @@ vint16m8_t test_vslideup_vx_i16m8(vint16m8_t dst, vint16m8_t src, size_t offset,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vslideup_vx_i32mf2(vint32mf2_t dst, vint32mf2_t src,
@@ -148,7 +148,7 @@ vint32mf2_t test_vslideup_vx_i32mf2(vint32mf2_t dst, vint32mf2_t src,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vslideup_vx_i32m1(vint32m1_t dst, vint32m1_t src, size_t offset,
@@ -158,7 +158,7 @@ vint32m1_t test_vslideup_vx_i32m1(vint32m1_t dst, vint32m1_t src, size_t offset,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vslideup_vx_i32m2(vint32m2_t dst, vint32m2_t src, size_t offset,
@@ -168,7 +168,7 @@ vint32m2_t test_vslideup_vx_i32m2(vint32m2_t dst, vint32m2_t src, size_t offset,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vslideup_vx_i32m4(vint32m4_t dst, vint32m4_t src, size_t offset,
@@ -178,7 +178,7 @@ vint32m4_t test_vslideup_vx_i32m4(vint32m4_t dst, vint32m4_t src, size_t offset,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslideup.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslideup.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vslideup_vx_i32m8(vint32m8_t dst, vint32m8_t src, size_t offset,
@@ -188,7 +188,7 @@ vint32m8_t test_vslideup_vx_i32m8(vint32m8_t dst, vint32m8_t src, size_t offset,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vslideup_vx_i64m1(vint64m1_t dst, vint64m1_t src, size_t offset,
@@ -198,7 +198,7 @@ vint64m1_t test_vslideup_vx_i64m1(vint64m1_t dst, vint64m1_t src, size_t offset,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vslideup_vx_i64m2(vint64m2_t dst, vint64m2_t src, size_t offset,
@@ -208,7 +208,7 @@ vint64m2_t test_vslideup_vx_i64m2(vint64m2_t dst, vint64m2_t src, size_t offset,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vslideup_vx_i64m4(vint64m4_t dst, vint64m4_t src, size_t offset,
@@ -218,7 +218,7 @@ vint64m4_t test_vslideup_vx_i64m4(vint64m4_t dst, vint64m4_t src, size_t offset,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslideup.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslideup.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vslideup_vx_i64m8(vint64m8_t dst, vint64m8_t src, size_t offset,
@@ -228,7 +228,7 @@ vint64m8_t test_vslideup_vx_i64m8(vint64m8_t dst, vint64m8_t src, size_t offset,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vslideup_vx_u8mf8(vuint8mf8_t dst, vuint8mf8_t src,
@@ -238,7 +238,7 @@ vuint8mf8_t test_vslideup_vx_u8mf8(vuint8mf8_t dst, vuint8mf8_t src,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vslideup_vx_u8mf4(vuint8mf4_t dst, vuint8mf4_t src,
@@ -248,7 +248,7 @@ vuint8mf4_t test_vslideup_vx_u8mf4(vuint8mf4_t dst, vuint8mf4_t src,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vslideup_vx_u8mf2(vuint8mf2_t dst, vuint8mf2_t src,
@@ -258,7 +258,7 @@ vuint8mf2_t test_vslideup_vx_u8mf2(vuint8mf2_t dst, vuint8mf2_t src,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vslideup_vx_u8m1(vuint8m1_t dst, vuint8m1_t src, size_t offset,
@@ -268,7 +268,7 @@ vuint8m1_t test_vslideup_vx_u8m1(vuint8m1_t dst, vuint8m1_t src, size_t offset,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vslideup_vx_u8m2(vuint8m2_t dst, vuint8m2_t src, size_t offset,
@@ -278,7 +278,7 @@ vuint8m2_t test_vslideup_vx_u8m2(vuint8m2_t dst, vuint8m2_t src, size_t offset,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vslideup_vx_u8m4(vuint8m4_t dst, vuint8m4_t src, size_t offset,
@@ -288,7 +288,7 @@ vuint8m4_t test_vslideup_vx_u8m4(vuint8m4_t dst, vuint8m4_t src, size_t offset,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslideup.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslideup.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vslideup_vx_u8m8(vuint8m8_t dst, vuint8m8_t src, size_t offset,
@@ -298,7 +298,7 @@ vuint8m8_t test_vslideup_vx_u8m8(vuint8m8_t dst, vuint8m8_t src, size_t offset,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vslideup_vx_u16mf4(vuint16mf4_t dst, vuint16mf4_t src,
@@ -308,7 +308,7 @@ vuint16mf4_t test_vslideup_vx_u16mf4(vuint16mf4_t dst, vuint16mf4_t src,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vslideup_vx_u16mf2(vuint16mf2_t dst, vuint16mf2_t src,
@@ -318,7 +318,7 @@ vuint16mf2_t test_vslideup_vx_u16mf2(vuint16mf2_t dst, vuint16mf2_t src,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vslideup_vx_u16m1(vuint16m1_t dst, vuint16m1_t src,
@@ -328,7 +328,7 @@ vuint16m1_t test_vslideup_vx_u16m1(vuint16m1_t dst, vuint16m1_t src,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vslideup_vx_u16m2(vuint16m2_t dst, vuint16m2_t src,
@@ -338,7 +338,7 @@ vuint16m2_t test_vslideup_vx_u16m2(vuint16m2_t dst, vuint16m2_t src,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vslideup_vx_u16m4(vuint16m4_t dst, vuint16m4_t src,
@@ -348,7 +348,7 @@ vuint16m4_t test_vslideup_vx_u16m4(vuint16m4_t dst, vuint16m4_t src,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslideup.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslideup.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vslideup_vx_u16m8(vuint16m8_t dst, vuint16m8_t src,
@@ -358,7 +358,7 @@ vuint16m8_t test_vslideup_vx_u16m8(vuint16m8_t dst, vuint16m8_t src,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vslideup_vx_u32mf2(vuint32mf2_t dst, vuint32mf2_t src,
@@ -368,7 +368,7 @@ vuint32mf2_t test_vslideup_vx_u32mf2(vuint32mf2_t dst, vuint32mf2_t src,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vslideup_vx_u32m1(vuint32m1_t dst, vuint32m1_t src,
@@ -378,7 +378,7 @@ vuint32m1_t test_vslideup_vx_u32m1(vuint32m1_t dst, vuint32m1_t src,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vslideup_vx_u32m2(vuint32m2_t dst, vuint32m2_t src,
@@ -388,7 +388,7 @@ vuint32m2_t test_vslideup_vx_u32m2(vuint32m2_t dst, vuint32m2_t src,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vslideup_vx_u32m4(vuint32m4_t dst, vuint32m4_t src,
@@ -398,7 +398,7 @@ vuint32m4_t test_vslideup_vx_u32m4(vuint32m4_t dst, vuint32m4_t src,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslideup.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslideup.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vslideup_vx_u32m8(vuint32m8_t dst, vuint32m8_t src,
@@ -408,7 +408,7 @@ vuint32m8_t test_vslideup_vx_u32m8(vuint32m8_t dst, vuint32m8_t src,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vslideup_vx_u64m1(vuint64m1_t dst, vuint64m1_t src,
@@ -418,7 +418,7 @@ vuint64m1_t test_vslideup_vx_u64m1(vuint64m1_t dst, vuint64m1_t src,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vslideup_vx_u64m2(vuint64m2_t dst, vuint64m2_t src,
@@ -428,7 +428,7 @@ vuint64m2_t test_vslideup_vx_u64m2(vuint64m2_t dst, vuint64m2_t src,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vslideup_vx_u64m4(vuint64m4_t dst, vuint64m4_t src,
@@ -438,7 +438,7 @@ vuint64m4_t test_vslideup_vx_u64m4(vuint64m4_t dst, vuint64m4_t src,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslideup.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslideup.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vslideup_vx_u64m8(vuint64m8_t dst, vuint64m8_t src,
@@ -448,7 +448,7 @@ vuint64m8_t test_vslideup_vx_u64m8(vuint64m8_t dst, vuint64m8_t src,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslideup.nxv1f32.i64(<vscale x 1 x float> [[DST:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslideup.nxv1f32.i64(<vscale x 1 x float> [[DST:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vslideup_vx_f32mf2(vfloat32mf2_t dst, vfloat32mf2_t src,
@@ -458,7 +458,7 @@ vfloat32mf2_t test_vslideup_vx_f32mf2(vfloat32mf2_t dst, vfloat32mf2_t src,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslideup.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslideup.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vslideup_vx_f32m1(vfloat32m1_t dst, vfloat32m1_t src,
@@ -468,7 +468,7 @@ vfloat32m1_t test_vslideup_vx_f32m1(vfloat32m1_t dst, vfloat32m1_t src,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslideup.nxv4f32.i64(<vscale x 4 x float> [[DST:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslideup.nxv4f32.i64(<vscale x 4 x float> [[DST:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vslideup_vx_f32m2(vfloat32m2_t dst, vfloat32m2_t src,
@@ -478,7 +478,7 @@ vfloat32m2_t test_vslideup_vx_f32m2(vfloat32m2_t dst, vfloat32m2_t src,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslideup.nxv8f32.i64(<vscale x 8 x float> [[DST:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslideup.nxv8f32.i64(<vscale x 8 x float> [[DST:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vslideup_vx_f32m4(vfloat32m4_t dst, vfloat32m4_t src,
@@ -488,7 +488,7 @@ vfloat32m4_t test_vslideup_vx_f32m4(vfloat32m4_t dst, vfloat32m4_t src,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslideup.nxv16f32.i64(<vscale x 16 x float> [[DST:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslideup.nxv16f32.i64(<vscale x 16 x float> [[DST:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vslideup_vx_f32m8(vfloat32m8_t dst, vfloat32m8_t src,
@@ -498,7 +498,7 @@ vfloat32m8_t test_vslideup_vx_f32m8(vfloat32m8_t dst, vfloat32m8_t src,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslideup.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslideup.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vslideup_vx_f64m1(vfloat64m1_t dst, vfloat64m1_t src,
@@ -508,7 +508,7 @@ vfloat64m1_t test_vslideup_vx_f64m1(vfloat64m1_t dst, vfloat64m1_t src,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslideup.nxv2f64.i64(<vscale x 2 x double> [[DST:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslideup.nxv2f64.i64(<vscale x 2 x double> [[DST:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vslideup_vx_f64m2(vfloat64m2_t dst, vfloat64m2_t src,
@@ -518,7 +518,7 @@ vfloat64m2_t test_vslideup_vx_f64m2(vfloat64m2_t dst, vfloat64m2_t src,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslideup.nxv4f64.i64(<vscale x 4 x double> [[DST:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslideup.nxv4f64.i64(<vscale x 4 x double> [[DST:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vslideup_vx_f64m4(vfloat64m4_t dst, vfloat64m4_t src,
@@ -528,7 +528,7 @@ vfloat64m4_t test_vslideup_vx_f64m4(vfloat64m4_t dst, vfloat64m4_t src,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslideup.nxv8f64.i64(<vscale x 8 x double> [[DST:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslideup.nxv8f64.i64(<vscale x 8 x double> [[DST:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vslideup_vx_f64m8(vfloat64m8_t dst, vfloat64m8_t src,
@@ -1098,7 +1098,7 @@ vfloat64m8_t test_vslideup_vx_f64m8_m(vbool8_t mask, vfloat64m8_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vslideup.nxv1f16.i64(<vscale x 1 x half> [[DEST:%.*]], <vscale x 1 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vslideup.nxv1f16.i64(<vscale x 1 x half> [[DEST:%.*]], <vscale x 1 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vslideup_vx_f16mf4 (vfloat16mf4_t dest, vfloat16mf4_t src, size_t offset, size_t vl) {
@@ -1107,7 +1107,7 @@ vfloat16mf4_t test_vslideup_vx_f16mf4 (vfloat16mf4_t dest, vfloat16mf4_t src, si
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vslideup.nxv2f16.i64(<vscale x 2 x half> [[DEST:%.*]], <vscale x 2 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vslideup.nxv2f16.i64(<vscale x 2 x half> [[DEST:%.*]], <vscale x 2 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vslideup_vx_f16mf2 (vfloat16mf2_t dest, vfloat16mf2_t src, size_t offset, size_t vl) {
@@ -1116,7 +1116,7 @@ vfloat16mf2_t test_vslideup_vx_f16mf2 (vfloat16mf2_t dest, vfloat16mf2_t src, si
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vslideup.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vslideup.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vslideup_vx_f16m1 (vfloat16m1_t dest, vfloat16m1_t src, size_t offset, size_t vl) {
@@ -1125,7 +1125,7 @@ vfloat16m1_t test_vslideup_vx_f16m1 (vfloat16m1_t dest, vfloat16m1_t src, size_t
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vslideup.nxv8f16.i64(<vscale x 8 x half> [[DEST:%.*]], <vscale x 8 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vslideup.nxv8f16.i64(<vscale x 8 x half> [[DEST:%.*]], <vscale x 8 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vslideup_vx_f16m2 (vfloat16m2_t dest, vfloat16m2_t src, size_t offset, size_t vl) {
@@ -1134,7 +1134,7 @@ vfloat16m2_t test_vslideup_vx_f16m2 (vfloat16m2_t dest, vfloat16m2_t src, size_t
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vslideup.nxv16f16.i64(<vscale x 16 x half> [[DEST:%.*]], <vscale x 16 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vslideup.nxv16f16.i64(<vscale x 16 x half> [[DEST:%.*]], <vscale x 16 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vslideup_vx_f16m4 (vfloat16m4_t dest, vfloat16m4_t src, size_t offset, size_t vl) {
@@ -1143,7 +1143,7 @@ vfloat16m4_t test_vslideup_vx_f16m4 (vfloat16m4_t dest, vfloat16m4_t src, size_t
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vslideup.nxv32f16.i64(<vscale x 32 x half> [[DEST:%.*]], <vscale x 32 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vslideup.nxv32f16.i64(<vscale x 32 x half> [[DEST:%.*]], <vscale x 32 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vslideup_vx_f16m8 (vfloat16m8_t dest, vfloat16m8_t src, size_t offset, size_t vl) {

diff  --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td
index 9286d30c7c284..e45b13b87bfed 100644
--- a/llvm/include/llvm/IR/IntrinsicsRISCV.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -672,16 +672,16 @@ let TargetPrefix = "riscv" in {
                     [ImmArg<ArgIndex<5>>, IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
     let VLOperand = 4;
   }
-  // Input: (vector_in, vector_in, vector_in/scalar_in, vl)
-  class RISCVTernaryAAAXUnMasked
+  // Input: (vector_in, vector_in, scalar_in, vl, policy)
+  class RVVSlideUnMasked
         : Intrinsic<[llvm_anyvector_ty],
                     [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
-                     LLVMMatchType<1>],
-                    [IntrNoMem]>, RISCVVIntrinsic {
+                     LLVMMatchType<1>, LLVMMatchType<1>],
+                    [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
     let VLOperand = 3;
   }
   // Input: (vector_in, vector_in, vector_in/scalar_in, mask, vl, policy)
-  class RISCVTernaryAAAXMasked
+  class RVVSlideMasked
         : Intrinsic<[llvm_anyvector_ty],
                     [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
                      LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
@@ -1106,9 +1106,9 @@ let TargetPrefix = "riscv" in {
     def "int_riscv_" # NAME : RISCVSaturatingBinaryABShiftUnMasked;
     def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryABShiftMasked;
   }
-  multiclass RISCVTernaryAAAX {
-    def "int_riscv_" # NAME : RISCVTernaryAAAXUnMasked;
-    def "int_riscv_" # NAME # "_mask" : RISCVTernaryAAAXMasked;
+  multiclass RVVSlide {
+    def "int_riscv_" # NAME : RVVSlideUnMasked;
+    def "int_riscv_" # NAME # "_mask" : RVVSlideMasked;
   }
   multiclass RISCVTernaryAAXA {
     def "int_riscv_" # NAME : RISCVTernaryAAXAUnMasked;
@@ -1355,8 +1355,8 @@ let TargetPrefix = "riscv" in {
 
   defm vfmerge : RISCVBinaryWithV0;
 
-  defm vslideup : RISCVTernaryAAAX;
-  defm vslidedown : RISCVTernaryAAAX;
+  defm vslideup : RVVSlide;
+  defm vslidedown : RVVSlide;
 
   defm vslide1up : RISCVBinaryAAX;
   defm vslide1down : RISCVBinaryAAX;

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index dfcfa3cf9d571..9f3540401600e 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -2509,9 +2509,9 @@ multiclass VPseudoTernaryV_VV_AAXA<string Constraint = "",
   }
 }
 
-multiclass VPseudoTernaryV_VX<string Constraint = ""> {
+multiclass VPseudoVSLDV_VX<string Constraint = ""> {
   foreach m = MxList in
-    defm _VX : VPseudoTernaryNoMaskNoPolicy<m.vrclass, m.vrclass, GPR, m, Constraint>;
+    defm _VX : VPseudoTernaryWithPolicy<m.vrclass, m.vrclass, GPR, m, Constraint>;
 }
 
 multiclass VPseudoTernaryV_VX_AAXA<string Constraint = ""> {
@@ -2550,9 +2550,9 @@ multiclass VPseudoTernaryW_VF {
                                                   m.vrclass, m, constraint>;
 }
 
-multiclass VPseudoTernaryV_VI<Operand ImmType = simm5, string Constraint = ""> {
+multiclass VPseudoVSLDV_VI<Operand ImmType = simm5, string Constraint = ""> {
   foreach m = MxList in
-    defm _VI : VPseudoTernaryNoMaskNoPolicy<m.vrclass, m.vrclass, ImmType, m, Constraint>;
+    defm _VI : VPseudoTernaryWithPolicy<m.vrclass, m.vrclass, ImmType, m, Constraint>;
 }
 
 multiclass VPseudoVMAC_VV_VX_AAXA<string Constraint = ""> {
@@ -2570,9 +2570,9 @@ multiclass VPseudoVMAC_VV_VF_AAXA<string Constraint = ""> {
 }
 
 multiclass VPseudoVSLD_VX_VI<Operand ImmType = simm5, string Constraint = ""> {
-  defm "" : VPseudoTernaryV_VX<Constraint>,
+  defm "" : VPseudoVSLDV_VX<Constraint>,
             Sched<[WriteVISlideX, ReadVISlideV, ReadVISlideV, ReadVISlideX, ReadVMask]>;
-  defm "" : VPseudoTernaryV_VI<ImmType, Constraint>,
+  defm "" : VPseudoVSLDV_VI<ImmType, Constraint>,
             Sched<[WriteVISlideI, ReadVISlideV, ReadVISlideV, ReadVMask]>;
 }
 
@@ -3970,10 +3970,10 @@ multiclass VPatTernaryV_VV_AAXA<string intrinsic, string instruction,
 multiclass VPatTernaryV_VX<string intrinsic, string instruction,
                            list<VTypeInfo> vtilist> {
   foreach vti = vtilist in
-    defm : VPatTernaryNoMaskNoPolicy<intrinsic, instruction, "VX",
-                                     vti.Vector, vti.Vector, XLenVT, vti.Mask,
-                                     vti.Log2SEW, vti.LMul, vti.RegClass,
-                                     vti.RegClass, GPR>;
+    defm : VPatTernaryWithPolicy<intrinsic, instruction, "VX",
+                                 vti.Vector, vti.Vector, XLenVT, vti.Mask,
+                                 vti.Log2SEW, vti.LMul, vti.RegClass,
+                                 vti.RegClass, GPR>;
 }
 
 multiclass VPatTernaryV_VX_AAXA<string intrinsic, string instruction,
@@ -3989,10 +3989,10 @@ multiclass VPatTernaryV_VX_AAXA<string intrinsic, string instruction,
 multiclass VPatTernaryV_VI<string intrinsic, string instruction,
                            list<VTypeInfo> vtilist, Operand Imm_type> {
   foreach vti = vtilist in
-    defm : VPatTernaryNoMaskNoPolicy<intrinsic, instruction, "VI",
-                                     vti.Vector, vti.Vector, XLenVT, vti.Mask,
-                                     vti.Log2SEW, vti.LMul, vti.RegClass,
-                                     vti.RegClass, Imm_type>;
+    defm : VPatTernaryWithPolicy<intrinsic, instruction, "VI",
+                                 vti.Vector, vti.Vector, XLenVT, vti.Mask,
+                                 vti.Log2SEW, vti.LMul, vti.RegClass,
+                                 vti.RegClass, Imm_type>;
 }
 
 multiclass VPatTernaryW_VV<string intrinsic, string instruction,
@@ -4030,6 +4030,7 @@ multiclass VPatTernaryV_VX_VI<string intrinsic, string instruction,
     : VPatTernaryV_VX<intrinsic, instruction, vtilist>,
       VPatTernaryV_VI<intrinsic, instruction, vtilist, Imm_type>;
 
+
 multiclass VPatBinaryM_VV_VX_VI<string intrinsic, string instruction,
                                 list<VTypeInfo> vtilist>
     : VPatBinaryM_VV<intrinsic, instruction, vtilist>,

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
index b097816864107..1c40413a3c939 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
@@ -1907,7 +1907,7 @@ foreach vti = !listconcat(AllIntegerVectors, AllFloatVectors) in {
                                           VLOpFrag)),
             (!cast<Instruction>("PseudoVSLIDEUP_VI_"#vti.LMul.MX)
                 vti.RegClass:$rs3, vti.RegClass:$rs1, uimm5:$rs2,
-                GPR:$vl, vti.Log2SEW)>;
+                GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
 
   def : Pat<(vti.Vector (riscv_slideup_vl (vti.Vector vti.RegClass:$rs3),
                                           (vti.Vector vti.RegClass:$rs1),
@@ -1915,7 +1915,7 @@ foreach vti = !listconcat(AllIntegerVectors, AllFloatVectors) in {
                                           VLOpFrag)),
             (!cast<Instruction>("PseudoVSLIDEUP_VX_"#vti.LMul.MX)
                 vti.RegClass:$rs3, vti.RegClass:$rs1, GPR:$rs2,
-                GPR:$vl, vti.Log2SEW)>;
+                GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
 
   def : Pat<(vti.Vector (riscv_slidedown_vl (vti.Vector vti.RegClass:$rs3),
                                             (vti.Vector vti.RegClass:$rs1),
@@ -1923,7 +1923,7 @@ foreach vti = !listconcat(AllIntegerVectors, AllFloatVectors) in {
                                             VLOpFrag)),
             (!cast<Instruction>("PseudoVSLIDEDOWN_VI_"#vti.LMul.MX)
                 vti.RegClass:$rs3, vti.RegClass:$rs1, uimm5:$rs2,
-                GPR:$vl, vti.Log2SEW)>;
+                GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
 
   def : Pat<(vti.Vector (riscv_slidedown_vl (vti.Vector vti.RegClass:$rs3),
                                             (vti.Vector vti.RegClass:$rs1),
@@ -1931,7 +1931,7 @@ foreach vti = !listconcat(AllIntegerVectors, AllFloatVectors) in {
                                             VLOpFrag)),
             (!cast<Instruction>("PseudoVSLIDEDOWN_VX_"#vti.LMul.MX)
                 vti.RegClass:$rs3, vti.RegClass:$rs1, GPR:$rs2,
-                GPR:$vl, vti.Log2SEW)>;
+                GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
 }
 
 } // Predicates = [HasVInstructions]

diff  --git a/llvm/test/CodeGen/RISCV/rvv/emergency-slot.mir b/llvm/test/CodeGen/RISCV/rvv/emergency-slot.mir
index 25afacf72b40b..a23c1d05ca975 100644
--- a/llvm/test/CodeGen/RISCV/rvv/emergency-slot.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/emergency-slot.mir
@@ -141,7 +141,7 @@ body:             |
   ; CHECK-NEXT:   $x5 = LD $x2, 0 :: (load (s64) from %stack.16)
   ; CHECK-NEXT:   renamable $v0 = PseudoVRELOAD_M1 killed $x1 :: (load unknown-size from %stack.1, align 8)
   ; CHECK-NEXT:   $x1 = LD $x2, 8 :: (load (s64) from %stack.15)
-  ; CHECK-NEXT:   renamable $v0 = PseudoVSLIDEDOWN_VX_M1 undef renamable $v0, killed renamable $v0, killed renamable $x13, $noreg, 3 /* e8 */, implicit $vl, implicit $vtype
+  ; CHECK-NEXT:   renamable $v0 = PseudoVSLIDEDOWN_VX_M1 undef renamable $v0, killed renamable $v0, killed renamable $x13, $noreg, 3 /* e8 */, 1, implicit $vl, implicit $vtype
   ; CHECK-NEXT:   renamable $x13 = PseudoVMV_X_S_M1 killed renamable $v0, 3 /* e8 */, implicit $vl, implicit $vtype
   ; CHECK-NEXT:   BLT killed renamable $x16, renamable $x27, %bb.2
   ; CHECK-NEXT: {{  $}}
@@ -209,7 +209,7 @@ body:             |
     dead renamable $x13 = PseudoVSETIVLI 1, 64, implicit-def $vl, implicit-def $vtype
     renamable $x13 = nsw ADDI renamable $x16, -2
     renamable $v0 = PseudoVRELOAD_M1 %stack.1 :: (load unknown-size from %stack.1, align 8)
-    renamable $v0 = PseudoVSLIDEDOWN_VX_M1 undef renamable $v0, killed renamable $v0, killed renamable $x13, $noreg, 3, implicit $vl, implicit $vtype
+    renamable $v0 = PseudoVSLIDEDOWN_VX_M1 undef renamable $v0, killed renamable $v0, killed renamable $x13, $noreg, 3, 1, implicit $vl, implicit $vtype
     renamable $x13 = PseudoVMV_X_S_M1 killed renamable $v0, 3, implicit $vl, implicit $vtype
     BLT killed renamable $x16, renamable $x27, %bb.2
 

diff  --git a/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll
index bc1b226aa0bea..ef3d7d54753cf 100644
--- a/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll
@@ -482,10 +482,10 @@ define <vscale x 6 x half> @extract_nxv6f16_nxv12f16_6(<vscale x 12 x half> %in)
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v14, v10, a0
 ; CHECK-NEXT:    vslidedown.vx v12, v9, a0
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vi v13, v14, 0
 ; CHECK-NEXT:    add a1, a0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vx v12, v10, a0
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll
index 2f8d04ba1e8d4..2804ea1886aae 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll
@@ -1451,7 +1451,7 @@ define <4 x i1> @pass_vector_mask_arg_via_stack(<4 x i1> %v) {
 ; LMULMAX8-NEXT:    vmerge.vim v16, v16, 1, v0
 ; LMULMAX8-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; LMULMAX8-NEXT:    vmv.v.i v17, 0
-; LMULMAX8-NEXT:    vsetivli zero, 4, e8, mf2, tu, mu
+; LMULMAX8-NEXT:    vsetivli zero, 4, e8, mf2, ta, mu
 ; LMULMAX8-NEXT:    vslideup.vi v17, v16, 0
 ; LMULMAX8-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; LMULMAX8-NEXT:    vmsne.vi v16, v17, 0
@@ -1489,7 +1489,7 @@ define <4 x i1> @pass_vector_mask_arg_via_stack(<4 x i1> %v) {
 ; LMULMAX4-NEXT:    vmerge.vim v12, v12, 1, v0
 ; LMULMAX4-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; LMULMAX4-NEXT:    vmv.v.i v13, 0
-; LMULMAX4-NEXT:    vsetivli zero, 4, e8, mf2, tu, mu
+; LMULMAX4-NEXT:    vsetivli zero, 4, e8, mf2, ta, mu
 ; LMULMAX4-NEXT:    vslideup.vi v13, v12, 0
 ; LMULMAX4-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; LMULMAX4-NEXT:    vmsne.vi v12, v13, 0
@@ -1533,7 +1533,7 @@ define <4 x i1> @pass_vector_mask_arg_via_stack(<4 x i1> %v) {
 ; LMULMAX2-NEXT:    vmerge.vim v10, v10, 1, v0
 ; LMULMAX2-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; LMULMAX2-NEXT:    vmv.v.i v11, 0
-; LMULMAX2-NEXT:    vsetivli zero, 4, e8, mf2, tu, mu
+; LMULMAX2-NEXT:    vsetivli zero, 4, e8, mf2, ta, mu
 ; LMULMAX2-NEXT:    vslideup.vi v11, v10, 0
 ; LMULMAX2-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; LMULMAX2-NEXT:    vmsne.vi v10, v11, 0
@@ -1589,7 +1589,7 @@ define <4 x i1> @pass_vector_mask_arg_via_stack(<4 x i1> %v) {
 ; LMULMAX1-NEXT:    vmerge.vim v9, v9, 1, v0
 ; LMULMAX1-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; LMULMAX1-NEXT:    vmv.v.i v10, 0
-; LMULMAX1-NEXT:    vsetivli zero, 4, e8, mf2, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 4, e8, mf2, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v10, v9, 0
 ; LMULMAX1-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; LMULMAX1-NEXT:    vmsne.vi v9, v10, 0

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll
index f2fd6e1d0d3a9..4d05697cdd324 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll
@@ -558,7 +558,7 @@ define void @truncstore_v2i8_v2i1(<2 x i8> %x, <2 x i1>* %z) {
 ; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmv.v.i v9, 0
-; CHECK-NEXT:    vsetivli zero, 2, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 2, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v9, v8, 0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmsne.vi v8, v9, 0
@@ -849,7 +849,7 @@ define void @truncstore_v16i16_v16i8(<16 x i16> %x, <16 x i8>* %z) {
 ; LMULMAX1-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
 ; LMULMAX1-NEXT:    vncvt.x.x.w v8, v8
-; LMULMAX1-NEXT:    vsetivli zero, 16, e8, m1, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 8
 ; LMULMAX1-NEXT:    vse8.v v8, (a0)
 ; LMULMAX1-NEXT:    ret
@@ -1164,7 +1164,7 @@ define void @truncstore_v8i32_v8i8(<8 x i32> %x, <8 x i8>* %z) {
 ; LMULMAX1-NEXT:    vncvt.x.x.w v8, v8
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
 ; LMULMAX1-NEXT:    vncvt.x.x.w v8, v8
-; LMULMAX1-NEXT:    vsetivli zero, 8, e8, mf2, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 4
 ; LMULMAX1-NEXT:    vse8.v v8, (a0)
 ; LMULMAX1-NEXT:    ret
@@ -1188,7 +1188,7 @@ define void @truncstore_v8i32_v8i16(<8 x i32> %x, <8 x i16>* %z) {
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
 ; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
 ; LMULMAX1-NEXT:    vncvt.x.x.w v8, v8
-; LMULMAX1-NEXT:    vsetivli zero, 8, e16, m1, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 4
 ; LMULMAX1-NEXT:    vse16.v v8, (a0)
 ; LMULMAX1-NEXT:    ret
@@ -1277,19 +1277,19 @@ define void @truncstore_v16i32_v16i8(<16 x i32> %x, <16 x i8>* %z) {
 ; LMULMAX1-NEXT:    vncvt.x.x.w v8, v8
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
 ; LMULMAX1-NEXT:    vncvt.x.x.w v8, v8
-; LMULMAX1-NEXT:    vsetivli zero, 8, e8, m1, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 8, e8, m1, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 4
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
 ; LMULMAX1-NEXT:    vncvt.x.x.w v9, v10
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
 ; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
-; LMULMAX1-NEXT:    vsetivli zero, 12, e8, m1, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 12, e8, m1, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 8
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
 ; LMULMAX1-NEXT:    vncvt.x.x.w v9, v11
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
 ; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
-; LMULMAX1-NEXT:    vsetivli zero, 16, e8, m1, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 12
 ; LMULMAX1-NEXT:    vse8.v v8, (a0)
 ; LMULMAX1-NEXT:    ret
@@ -1313,12 +1313,12 @@ define void @truncstore_v16i32_v16i16(<16 x i32> %x, <16 x i16>* %z) {
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
 ; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
 ; LMULMAX1-NEXT:    vncvt.x.x.w v8, v8
-; LMULMAX1-NEXT:    vsetivli zero, 8, e16, m1, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 4
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
 ; LMULMAX1-NEXT:    vncvt.x.x.w v9, v11
 ; LMULMAX1-NEXT:    vncvt.x.x.w v10, v10
-; LMULMAX1-NEXT:    vsetivli zero, 8, e16, m1, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v10, v9, 4
 ; LMULMAX1-NEXT:    addi a1, a0, 16
 ; LMULMAX1-NEXT:    vse16.v v10, (a1)
@@ -1489,7 +1489,7 @@ define void @truncstore_v4i64_v4i8(<4 x i64> %x, <4 x i8>* %z) {
 ; LMULMAX1-NEXT:    vncvt.x.x.w v8, v8
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
 ; LMULMAX1-NEXT:    vncvt.x.x.w v8, v8
-; LMULMAX1-NEXT:    vsetivli zero, 4, e8, mf4, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 2
 ; LMULMAX1-NEXT:    vse8.v v8, (a0)
 ; LMULMAX1-NEXT:    ret
@@ -1520,7 +1520,7 @@ define void @truncstore_v4i64_v4i16(<4 x i64> %x, <4 x i16>* %z) {
 ; LMULMAX1-NEXT:    vncvt.x.x.w v8, v8
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; LMULMAX1-NEXT:    vncvt.x.x.w v8, v8
-; LMULMAX1-NEXT:    vsetivli zero, 4, e16, mf2, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 2
 ; LMULMAX1-NEXT:    vse16.v v8, (a0)
 ; LMULMAX1-NEXT:    ret
@@ -1544,7 +1544,7 @@ define void @truncstore_v4i64_v4i32(<4 x i64> %x, <4 x i32>* %z) {
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
 ; LMULMAX1-NEXT:    vncvt.x.x.w v8, v8
-; LMULMAX1-NEXT:    vsetivli zero, 4, e32, m1, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 2
 ; LMULMAX1-NEXT:    vse32.v v8, (a0)
 ; LMULMAX1-NEXT:    ret
@@ -1575,7 +1575,7 @@ define void @truncstore_v8i64_v8i8(<8 x i64> %x, <8 x i8>* %z) {
 ; LMULMAX1-NEXT:    vncvt.x.x.w v8, v8
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
 ; LMULMAX1-NEXT:    vncvt.x.x.w v8, v8
-; LMULMAX1-NEXT:    vsetivli zero, 4, e8, mf2, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 4, e8, mf2, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 2
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; LMULMAX1-NEXT:    vncvt.x.x.w v9, v10
@@ -1583,7 +1583,7 @@ define void @truncstore_v8i64_v8i8(<8 x i64> %x, <8 x i8>* %z) {
 ; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
 ; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
-; LMULMAX1-NEXT:    vsetivli zero, 6, e8, mf2, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 6, e8, mf2, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 4
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; LMULMAX1-NEXT:    vncvt.x.x.w v9, v11
@@ -1591,7 +1591,7 @@ define void @truncstore_v8i64_v8i8(<8 x i64> %x, <8 x i8>* %z) {
 ; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
 ; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
-; LMULMAX1-NEXT:    vsetivli zero, 8, e8, mf2, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 6
 ; LMULMAX1-NEXT:    vse8.v v8, (a0)
 ; LMULMAX1-NEXT:    ret
@@ -1622,19 +1622,19 @@ define void @truncstore_v8i64_v8i16(<8 x i64> %x, <8 x i16>* %z) {
 ; LMULMAX1-NEXT:    vncvt.x.x.w v8, v8
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; LMULMAX1-NEXT:    vncvt.x.x.w v8, v8
-; LMULMAX1-NEXT:    vsetivli zero, 4, e16, m1, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 4, e16, m1, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 2
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; LMULMAX1-NEXT:    vncvt.x.x.w v9, v10
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
-; LMULMAX1-NEXT:    vsetivli zero, 6, e16, m1, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 6, e16, m1, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 4
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; LMULMAX1-NEXT:    vncvt.x.x.w v9, v11
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
-; LMULMAX1-NEXT:    vsetivli zero, 8, e16, m1, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 6
 ; LMULMAX1-NEXT:    vse16.v v8, (a0)
 ; LMULMAX1-NEXT:    ret
@@ -1658,12 +1658,12 @@ define void @truncstore_v8i64_v8i32(<8 x i64> %x, <8 x i32>* %z) {
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
 ; LMULMAX1-NEXT:    vncvt.x.x.w v8, v8
-; LMULMAX1-NEXT:    vsetivli zero, 4, e32, m1, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 2
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; LMULMAX1-NEXT:    vncvt.x.x.w v9, v11
 ; LMULMAX1-NEXT:    vncvt.x.x.w v10, v10
-; LMULMAX1-NEXT:    vsetivli zero, 4, e32, m1, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v10, v9, 2
 ; LMULMAX1-NEXT:    addi a1, a0, 16
 ; LMULMAX1-NEXT:    vse32.v v10, (a1)
@@ -1696,7 +1696,7 @@ define void @truncstore_v16i64_v16i8(<16 x i64> %x, <16 x i8>* %z) {
 ; LMULMAX1-NEXT:    vncvt.x.x.w v8, v8
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
 ; LMULMAX1-NEXT:    vncvt.x.x.w v8, v8
-; LMULMAX1-NEXT:    vsetivli zero, 4, e8, m1, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 4, e8, m1, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 2
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; LMULMAX1-NEXT:    vncvt.x.x.w v9, v10
@@ -1704,7 +1704,7 @@ define void @truncstore_v16i64_v16i8(<16 x i64> %x, <16 x i8>* %z) {
 ; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
 ; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
-; LMULMAX1-NEXT:    vsetivli zero, 6, e8, m1, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 6, e8, m1, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 4
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; LMULMAX1-NEXT:    vncvt.x.x.w v9, v11
@@ -1712,7 +1712,7 @@ define void @truncstore_v16i64_v16i8(<16 x i64> %x, <16 x i8>* %z) {
 ; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
 ; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
-; LMULMAX1-NEXT:    vsetivli zero, 8, e8, m1, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 8, e8, m1, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 6
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; LMULMAX1-NEXT:    vncvt.x.x.w v9, v12
@@ -1720,7 +1720,7 @@ define void @truncstore_v16i64_v16i8(<16 x i64> %x, <16 x i8>* %z) {
 ; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
 ; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
-; LMULMAX1-NEXT:    vsetivli zero, 10, e8, m1, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 10, e8, m1, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 8
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; LMULMAX1-NEXT:    vncvt.x.x.w v9, v13
@@ -1728,7 +1728,7 @@ define void @truncstore_v16i64_v16i8(<16 x i64> %x, <16 x i8>* %z) {
 ; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
 ; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
-; LMULMAX1-NEXT:    vsetivli zero, 12, e8, m1, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 12, e8, m1, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 10
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; LMULMAX1-NEXT:    vncvt.x.x.w v9, v14
@@ -1736,7 +1736,7 @@ define void @truncstore_v16i64_v16i8(<16 x i64> %x, <16 x i8>* %z) {
 ; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
 ; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
-; LMULMAX1-NEXT:    vsetivli zero, 14, e8, m1, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 14, e8, m1, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 12
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; LMULMAX1-NEXT:    vncvt.x.x.w v9, v15
@@ -1744,7 +1744,7 @@ define void @truncstore_v16i64_v16i8(<16 x i64> %x, <16 x i8>* %z) {
 ; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
 ; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
-; LMULMAX1-NEXT:    vsetivli zero, 16, e8, m1, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 14
 ; LMULMAX1-NEXT:    vse8.v v8, (a0)
 ; LMULMAX1-NEXT:    ret
@@ -1763,7 +1763,7 @@ define void @truncstore_v16i64_v16i8(<16 x i64> %x, <16 x i8>* %z) {
 ; LMULMAX4-NEXT:    vncvt.x.x.w v8, v14
 ; LMULMAX4-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
 ; LMULMAX4-NEXT:    vncvt.x.x.w v8, v8
-; LMULMAX4-NEXT:    vsetivli zero, 16, e8, m1, tu, mu
+; LMULMAX4-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
 ; LMULMAX4-NEXT:    vslideup.vi v8, v12, 8
 ; LMULMAX4-NEXT:    vse8.v v8, (a0)
 ; LMULMAX4-NEXT:    ret
@@ -1783,19 +1783,19 @@ define void @truncstore_v16i64_v16i16(<16 x i64> %x, <16 x i16>* %z) {
 ; LMULMAX1-NEXT:    vncvt.x.x.w v8, v8
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; LMULMAX1-NEXT:    vncvt.x.x.w v8, v8
-; LMULMAX1-NEXT:    vsetivli zero, 4, e16, m1, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 4, e16, m1, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 2
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; LMULMAX1-NEXT:    vncvt.x.x.w v9, v10
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
-; LMULMAX1-NEXT:    vsetivli zero, 6, e16, m1, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 6, e16, m1, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 4
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; LMULMAX1-NEXT:    vncvt.x.x.w v9, v11
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
-; LMULMAX1-NEXT:    vsetivli zero, 8, e16, m1, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 6
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; LMULMAX1-NEXT:    vncvt.x.x.w v9, v13
@@ -1805,19 +1805,19 @@ define void @truncstore_v16i64_v16i16(<16 x i64> %x, <16 x i16>* %z) {
 ; LMULMAX1-NEXT:    vncvt.x.x.w v10, v12
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; LMULMAX1-NEXT:    vncvt.x.x.w v10, v10
-; LMULMAX1-NEXT:    vsetivli zero, 4, e16, m1, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 4, e16, m1, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v10, v9, 2
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; LMULMAX1-NEXT:    vncvt.x.x.w v9, v14
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
-; LMULMAX1-NEXT:    vsetivli zero, 6, e16, m1, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 6, e16, m1, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v10, v9, 4
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; LMULMAX1-NEXT:    vncvt.x.x.w v9, v15
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
-; LMULMAX1-NEXT:    vsetivli zero, 8, e16, m1, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v10, v9, 6
 ; LMULMAX1-NEXT:    addi a1, a0, 16
 ; LMULMAX1-NEXT:    vse16.v v10, (a1)
@@ -1834,7 +1834,7 @@ define void @truncstore_v16i64_v16i16(<16 x i64> %x, <16 x i16>* %z) {
 ; LMULMAX4-NEXT:    vncvt.x.x.w v14, v8
 ; LMULMAX4-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
 ; LMULMAX4-NEXT:    vncvt.x.x.w v8, v14
-; LMULMAX4-NEXT:    vsetivli zero, 16, e16, m2, tu, mu
+; LMULMAX4-NEXT:    vsetivli zero, 16, e16, m2, ta, mu
 ; LMULMAX4-NEXT:    vslideup.vi v8, v12, 8
 ; LMULMAX4-NEXT:    vse16.v v8, (a0)
 ; LMULMAX4-NEXT:    ret
@@ -1849,22 +1849,22 @@ define void @truncstore_v16i64_v16i32(<16 x i64> %x, <16 x i32>* %z) {
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
 ; LMULMAX1-NEXT:    vncvt.x.x.w v8, v8
-; LMULMAX1-NEXT:    vsetivli zero, 4, e32, m1, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 2
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; LMULMAX1-NEXT:    vncvt.x.x.w v9, v11
 ; LMULMAX1-NEXT:    vncvt.x.x.w v10, v10
-; LMULMAX1-NEXT:    vsetivli zero, 4, e32, m1, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v10, v9, 2
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; LMULMAX1-NEXT:    vncvt.x.x.w v9, v13
 ; LMULMAX1-NEXT:    vncvt.x.x.w v11, v12
-; LMULMAX1-NEXT:    vsetivli zero, 4, e32, m1, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v11, v9, 2
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; LMULMAX1-NEXT:    vncvt.x.x.w v9, v15
 ; LMULMAX1-NEXT:    vncvt.x.x.w v12, v14
-; LMULMAX1-NEXT:    vsetivli zero, 4, e32, m1, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v12, v9, 2
 ; LMULMAX1-NEXT:    addi a1, a0, 48
 ; LMULMAX1-NEXT:    vse32.v v12, (a1)
@@ -1880,7 +1880,7 @@ define void @truncstore_v16i64_v16i32(<16 x i64> %x, <16 x i32>* %z) {
 ; LMULMAX4-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
 ; LMULMAX4-NEXT:    vncvt.x.x.w v16, v12
 ; LMULMAX4-NEXT:    vncvt.x.x.w v12, v8
-; LMULMAX4-NEXT:    vsetivli zero, 16, e32, m4, tu, mu
+; LMULMAX4-NEXT:    vsetivli zero, 16, e32, m4, ta, mu
 ; LMULMAX4-NEXT:    vslideup.vi v12, v16, 8
 ; LMULMAX4-NEXT:    vse32.v v12, (a0)
 ; LMULMAX4-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll
index 7eb5bd53df45c..c2ed5f0cb8238 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll
@@ -346,7 +346,7 @@ define void @extract_v2i1_v64i1_0(<64 x i1>* %x, <2 x i1>* %y) {
 ; LMULMAX2-NEXT:    vmerge.vim v8, v8, 1, v0
 ; LMULMAX2-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; LMULMAX2-NEXT:    vmv.v.i v9, 0
-; LMULMAX2-NEXT:    vsetivli zero, 2, e8, mf2, tu, mu
+; LMULMAX2-NEXT:    vsetivli zero, 2, e8, mf2, ta, mu
 ; LMULMAX2-NEXT:    vslideup.vi v9, v8, 0
 ; LMULMAX2-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; LMULMAX2-NEXT:    vmsne.vi v8, v9, 0
@@ -362,7 +362,7 @@ define void @extract_v2i1_v64i1_0(<64 x i1>* %x, <2 x i1>* %y) {
 ; LMULMAX1-NEXT:    vmerge.vim v8, v8, 1, v0
 ; LMULMAX1-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; LMULMAX1-NEXT:    vmv.v.i v9, 0
-; LMULMAX1-NEXT:    vsetivli zero, 2, e8, mf2, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 2, e8, mf2, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v9, v8, 0
 ; LMULMAX1-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; LMULMAX1-NEXT:    vmsne.vi v8, v9, 0
@@ -390,7 +390,7 @@ define void @extract_v2i1_v64i1_2(<64 x i1>* %x, <2 x i1>* %y) {
 ; LMULMAX2-NEXT:    vmerge.vim v8, v8, 1, v0
 ; LMULMAX2-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; LMULMAX2-NEXT:    vmv.v.i v9, 0
-; LMULMAX2-NEXT:    vsetivli zero, 2, e8, mf2, tu, mu
+; LMULMAX2-NEXT:    vsetivli zero, 2, e8, mf2, ta, mu
 ; LMULMAX2-NEXT:    vslideup.vi v9, v8, 0
 ; LMULMAX2-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; LMULMAX2-NEXT:    vmsne.vi v8, v9, 0
@@ -411,7 +411,7 @@ define void @extract_v2i1_v64i1_2(<64 x i1>* %x, <2 x i1>* %y) {
 ; LMULMAX1-NEXT:    vmerge.vim v8, v8, 1, v0
 ; LMULMAX1-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; LMULMAX1-NEXT:    vmv.v.i v9, 0
-; LMULMAX1-NEXT:    vsetivli zero, 2, e8, mf2, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 2, e8, mf2, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v9, v8, 0
 ; LMULMAX1-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; LMULMAX1-NEXT:    vmsne.vi v8, v9, 0
@@ -440,7 +440,7 @@ define void @extract_v2i1_v64i1_42(<64 x i1>* %x, <2 x i1>* %y) {
 ; LMULMAX2-NEXT:    vmerge.vim v8, v8, 1, v0
 ; LMULMAX2-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; LMULMAX2-NEXT:    vmv.v.i v9, 0
-; LMULMAX2-NEXT:    vsetivli zero, 2, e8, mf2, tu, mu
+; LMULMAX2-NEXT:    vsetivli zero, 2, e8, mf2, ta, mu
 ; LMULMAX2-NEXT:    vslideup.vi v9, v8, 0
 ; LMULMAX2-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; LMULMAX2-NEXT:    vmsne.vi v8, v9, 0
@@ -462,7 +462,7 @@ define void @extract_v2i1_v64i1_42(<64 x i1>* %x, <2 x i1>* %y) {
 ; LMULMAX1-NEXT:    vmerge.vim v8, v8, 1, v0
 ; LMULMAX1-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; LMULMAX1-NEXT:    vmv.v.i v9, 0
-; LMULMAX1-NEXT:    vsetivli zero, 2, e8, mf2, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 2, e8, mf2, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v9, v8, 0
 ; LMULMAX1-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; LMULMAX1-NEXT:    vmsne.vi v8, v9, 0
@@ -482,7 +482,7 @@ define void @extract_v2i1_nxv2i1_0(<vscale x 2 x i1> %x, <2 x i1>* %y) {
 ; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmv.v.i v9, 0
-; CHECK-NEXT:    vsetivli zero, 2, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 2, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v9, v8, 0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmsne.vi v8, v9, 0
@@ -507,7 +507,7 @@ define void @extract_v2i1_nxv2i1_2(<vscale x 2 x i1> %x, <2 x i1>* %y) {
 ; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmv.v.i v9, 0
-; CHECK-NEXT:    vsetivli zero, 2, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 2, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v9, v8, 0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmsne.vi v8, v9, 0
@@ -526,7 +526,7 @@ define void @extract_v2i1_nxv64i1_0(<vscale x 64 x i1> %x, <2 x i1>* %y) {
 ; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmv.v.i v9, 0
-; CHECK-NEXT:    vsetivli zero, 2, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 2, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v9, v8, 0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmsne.vi v8, v9, 0
@@ -551,7 +551,7 @@ define void @extract_v2i1_nxv64i1_2(<vscale x 64 x i1> %x, <2 x i1>* %y) {
 ; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmv.v.i v9, 0
-; CHECK-NEXT:    vsetivli zero, 2, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 2, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v9, v8, 0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmsne.vi v8, v9, 0
@@ -577,7 +577,7 @@ define void @extract_v2i1_nxv64i1_42(<vscale x 64 x i1> %x, <2 x i1>* %y) {
 ; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmv.v.i v9, 0
-; CHECK-NEXT:    vsetivli zero, 2, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 2, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v9, v8, 0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmsne.vi v8, v9, 0
@@ -602,7 +602,7 @@ define void @extract_v2i1_nxv32i1_26(<vscale x 32 x i1> %x, <2 x i1>* %y) {
 ; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmv.v.i v9, 0
-; CHECK-NEXT:    vsetivli zero, 2, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 2, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v9, v8, 0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmsne.vi v8, v9, 0

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll
index 2ca0850cdde36..e9957e6faf74c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll
@@ -117,7 +117,7 @@ define void @buildvec_dominant0_v4f32(<4 x float>* %x) {
 ; CHECK-NEXT:    addi a1, a1, %lo(.LCPI4_0)
 ; CHECK-NEXT:    vlse32.v v8, (a1), zero
 ; CHECK-NEXT:    vmv.s.x v9, zero
-; CHECK-NEXT:    vsetivli zero, 3, e32, m1, tu, mu
+; CHECK-NEXT:    vsetivli zero, 3, e32, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 2
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; CHECK-NEXT:    vse32.v v8, (a0)
@@ -132,7 +132,7 @@ define void @buildvec_dominant1_v4f32(<4 x float>* %x, float %f) {
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; CHECK-NEXT:    vmv.s.x v8, zero
 ; CHECK-NEXT:    vfmv.v.f v9, fa0
-; CHECK-NEXT:    vsetivli zero, 2, e32, m1, tu, mu
+; CHECK-NEXT:    vsetivli zero, 2, e32, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vi v9, v8, 1
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; CHECK-NEXT:    vse32.v v9, (a0)
@@ -153,7 +153,7 @@ define void @buildvec_dominant2_v4f32(<4 x float>* %x, float %f) {
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; CHECK-NEXT:    vfmv.s.f v8, ft0
 ; CHECK-NEXT:    vfmv.v.f v9, fa0
-; CHECK-NEXT:    vsetivli zero, 2, e32, m1, tu, mu
+; CHECK-NEXT:    vsetivli zero, 2, e32, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vi v9, v8, 1
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; CHECK-NEXT:    vse32.v v9, (a0)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-conv.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-conv.ll
index faf403816b02e..bae8a627d61dd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-conv.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-conv.ll
@@ -164,7 +164,7 @@ define void @fpround_v8f32_v8f16(<8 x float>* %x, <8 x half>* %y) {
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
 ; LMULMAX1-NEXT:    vfncvt.f.f.w v10, v8
 ; LMULMAX1-NEXT:    vfncvt.f.f.w v8, v9
-; LMULMAX1-NEXT:    vsetivli zero, 8, e16, m1, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v10, v8, 4
 ; LMULMAX1-NEXT:    vse16.v v10, (a1)
 ; LMULMAX1-NEXT:    ret
@@ -204,19 +204,19 @@ define void @fpround_v8f64_v8f16(<8 x double>* %x, <8 x half>* %y) {
 ; LMULMAX1-NEXT:    vfncvt.rod.f.f.w v12, v11
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; LMULMAX1-NEXT:    vfncvt.f.f.w v11, v12
-; LMULMAX1-NEXT:    vsetivli zero, 4, e16, m1, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 4, e16, m1, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v10, v11, 2
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; LMULMAX1-NEXT:    vfncvt.rod.f.f.w v11, v9
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; LMULMAX1-NEXT:    vfncvt.f.f.w v9, v11
-; LMULMAX1-NEXT:    vsetivli zero, 6, e16, m1, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 6, e16, m1, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v10, v9, 4
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; LMULMAX1-NEXT:    vfncvt.rod.f.f.w v9, v8
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; LMULMAX1-NEXT:    vfncvt.f.f.w v8, v9
-; LMULMAX1-NEXT:    vsetivli zero, 8, e16, m1, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v10, v8, 6
 ; LMULMAX1-NEXT:    vse16.v v10, (a1)
 ; LMULMAX1-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll
index 12f4bcb554cdb..fd9a076baa593 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll
@@ -46,7 +46,7 @@ define void @fcmp_une_vv_v4f32(<4 x float>* %x, <4 x float>* %y, <4 x i1>* %z) {
 ; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmv.v.i v9, 0
-; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v9, v8, 0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmsne.vi v8, v9, 0
@@ -71,7 +71,7 @@ define void @fcmp_une_vv_v4f32_nonans(<4 x float>* %x, <4 x float>* %y, <4 x i1>
 ; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmv.v.i v9, 0
-; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v9, v8, 0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmsne.vi v8, v9, 0
@@ -96,7 +96,7 @@ define void @fcmp_ogt_vv_v2f64(<2 x double>* %x, <2 x double>* %y, <2 x i1>* %z)
 ; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmv.v.i v9, 0
-; CHECK-NEXT:    vsetivli zero, 2, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 2, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v9, v8, 0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmsne.vi v8, v9, 0
@@ -121,7 +121,7 @@ define void @fcmp_ogt_vv_v2f64_nonans(<2 x double>* %x, <2 x double>* %y, <2 x i
 ; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmv.v.i v9, 0
-; CHECK-NEXT:    vsetivli zero, 2, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 2, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v9, v8, 0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmsne.vi v8, v9, 0
@@ -210,7 +210,7 @@ define void @fcmp_ole_vv_v4f64(<4 x double>* %x, <4 x double>* %y, <4 x i1>* %z)
 ; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmv.v.i v9, 0
-; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v9, v8, 0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmsne.vi v8, v9, 0
@@ -235,7 +235,7 @@ define void @fcmp_ole_vv_v4f64_nonans(<4 x double>* %x, <4 x double>* %y, <4 x i
 ; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmv.v.i v9, 0
-; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v9, v8, 0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmsne.vi v8, v9, 0
@@ -468,7 +468,7 @@ define void @fcmp_ord_vv_v4f16(<4 x half>* %x, <4 x half>* %y, <4 x i1>* %z) {
 ; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmv.v.i v9, 0
-; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v9, v8, 0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmsne.vi v8, v9, 0
@@ -495,7 +495,7 @@ define void @fcmp_uno_vv_v4f16(<2 x half>* %x, <2 x half>* %y, <2 x i1>* %z) {
 ; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmv.v.i v9, 0
-; CHECK-NEXT:    vsetivli zero, 2, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 2, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v9, v8, 0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmsne.vi v8, v9, 0
@@ -551,7 +551,7 @@ define void @fcmp_une_vf_v4f32(<4 x float>* %x, float %y, <4 x i1>* %z) {
 ; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmv.v.i v9, 0
-; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v9, v8, 0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmsne.vi v8, v9, 0
@@ -576,7 +576,7 @@ define void @fcmp_une_vf_v4f32_nonans(<4 x float>* %x, float %y, <4 x i1>* %z) {
 ; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmv.v.i v9, 0
-; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v9, v8, 0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmsne.vi v8, v9, 0
@@ -601,7 +601,7 @@ define void @fcmp_ogt_vf_v2f64(<2 x double>* %x, double %y, <2 x i1>* %z) {
 ; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmv.v.i v9, 0
-; CHECK-NEXT:    vsetivli zero, 2, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 2, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v9, v8, 0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmsne.vi v8, v9, 0
@@ -626,7 +626,7 @@ define void @fcmp_ogt_vf_v2f64_nonans(<2 x double>* %x, double %y, <2 x i1>* %z)
 ; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmv.v.i v9, 0
-; CHECK-NEXT:    vsetivli zero, 2, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 2, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v9, v8, 0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmsne.vi v8, v9, 0
@@ -715,7 +715,7 @@ define void @fcmp_ole_vf_v4f64(<4 x double>* %x, double %y, <4 x i1>* %z) {
 ; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmv.v.i v9, 0
-; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v9, v8, 0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmsne.vi v8, v9, 0
@@ -740,7 +740,7 @@ define void @fcmp_ole_vf_v4f64_nonans(<4 x double>* %x, double %y, <4 x i1>* %z)
 ; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmv.v.i v9, 0
-; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v9, v8, 0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmsne.vi v8, v9, 0
@@ -974,7 +974,7 @@ define void @fcmp_ord_vf_v4f16(<4 x half>* %x, half %y, <4 x i1>* %z) {
 ; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmv.v.i v9, 0
-; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v9, v8, 0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmsne.vi v8, v9, 0
@@ -1002,7 +1002,7 @@ define void @fcmp_uno_vf_v4f16(<2 x half>* %x, half %y, <2 x i1>* %z) {
 ; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmv.v.i v9, 0
-; CHECK-NEXT:    vsetivli zero, 2, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 2, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v9, v8, 0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmsne.vi v8, v9, 0
@@ -1059,7 +1059,7 @@ define void @fcmp_une_fv_v4f32(<4 x float>* %x, float %y, <4 x i1>* %z) {
 ; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmv.v.i v9, 0
-; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v9, v8, 0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmsne.vi v8, v9, 0
@@ -1084,7 +1084,7 @@ define void @fcmp_une_fv_v4f32_nonans(<4 x float>* %x, float %y, <4 x i1>* %z) {
 ; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmv.v.i v9, 0
-; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v9, v8, 0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmsne.vi v8, v9, 0
@@ -1109,7 +1109,7 @@ define void @fcmp_ogt_fv_v2f64(<2 x double>* %x, double %y, <2 x i1>* %z) {
 ; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmv.v.i v9, 0
-; CHECK-NEXT:    vsetivli zero, 2, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 2, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v9, v8, 0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmsne.vi v8, v9, 0
@@ -1134,7 +1134,7 @@ define void @fcmp_ogt_fv_v2f64_nonans(<2 x double>* %x, double %y, <2 x i1>* %z)
 ; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmv.v.i v9, 0
-; CHECK-NEXT:    vsetivli zero, 2, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 2, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v9, v8, 0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmsne.vi v8, v9, 0
@@ -1223,7 +1223,7 @@ define void @fcmp_ole_fv_v4f64(<4 x double>* %x, double %y, <4 x i1>* %z) {
 ; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmv.v.i v9, 0
-; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v9, v8, 0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmsne.vi v8, v9, 0
@@ -1248,7 +1248,7 @@ define void @fcmp_ole_fv_v4f64_nonans(<4 x double>* %x, double %y, <4 x i1>* %z)
 ; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmv.v.i v9, 0
-; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v9, v8, 0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmsne.vi v8, v9, 0
@@ -1482,7 +1482,7 @@ define void @fcmp_ord_fv_v4f16(<4 x half>* %x, half %y, <4 x i1>* %z) {
 ; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmv.v.i v9, 0
-; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v9, v8, 0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmsne.vi v8, v9, 0
@@ -1510,7 +1510,7 @@ define void @fcmp_uno_fv_v4f16(<2 x half>* %x, half %y, <2 x i1>* %z) {
 ; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmv.v.i v9, 0
-; CHECK-NEXT:    vsetivli zero, 2, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 2, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v9, v8, 0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmsne.vi v8, v9, 0

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll
index 1ffce0725b346..b8a6bdc49fe52 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll
@@ -283,9 +283,9 @@ define <8 x float> @splice_unary(<8 x float> %x) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 7, e32, m2, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v10, v8, 1
-; CHECK-NEXT:    vsetivli zero, 8, e32, m2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v10, v8, 7
-; CHECK-NEXT:    vmv2r.v v8, v10
+; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
   %s = shufflevector <8 x float> %x, <8 x float> poison, <8 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0>
   ret <8 x float> %s
@@ -296,9 +296,9 @@ define <8 x double> @splice_unary2(<8 x double> %x) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m4, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v12, v8, 6
-; CHECK-NEXT:    vsetivli zero, 8, e64, m4, tu, mu
+; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vi v12, v8, 2
-; CHECK-NEXT:    vmv4r.v v8, v12
+; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
   %s = shufflevector <8 x double> %x, <8 x double> poison, <8 x i32> <i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5>
   ret <8 x double> %s
@@ -309,7 +309,7 @@ define <8 x float> @splice_binary(<8 x float> %x, <8 x float> %y) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 6, e32, m2, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 2
-; CHECK-NEXT:    vsetivli zero, 8, e32, m2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 6
 ; CHECK-NEXT:    ret
   %s = shufflevector <8 x float> %x, <8 x float> %y, <8 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 9>
@@ -321,9 +321,9 @@ define <8 x double> @splice_binary2(<8 x double> %x, <8 x double> %y) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 3, e64, m4, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v12, v12, 5
-; CHECK-NEXT:    vsetivli zero, 8, e64, m4, tu, mu
+; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vi v12, v8, 3
-; CHECK-NEXT:    vmv4r.v v8, v12
+; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
   %s = shufflevector <8 x double> %x, <8 x double> %y, <8 x i32> <i32 13, i32 14, i32 15, i32 0, i32 1, i32 2, i32 3, i32 4>
   ret <8 x double> %s

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll
index 566fcdbc2ca9a..f022c500e49a0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll
@@ -135,9 +135,8 @@ define <8 x i1> @fp2si_v8f32_v8i1(<8 x float> %x) {
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
 ; LMULMAX1-NEXT:    vmv.v.i v9, 0
 ; LMULMAX1-NEXT:    vmerge.vim v9, v9, 1, v0
-; LMULMAX1-NEXT:    vsetivli zero, 8, e8, mf2, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 4
-; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
 ; LMULMAX1-NEXT:    vmsne.vi v0, v8, 0
 ; LMULMAX1-NEXT:    ret
   %z = fptosi <8 x float> %x to <8 x i1>
@@ -169,9 +168,8 @@ define <8 x i1> @fp2ui_v8f32_v8i1(<8 x float> %x) {
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
 ; LMULMAX1-NEXT:    vmv.v.i v9, 0
 ; LMULMAX1-NEXT:    vmerge.vim v9, v9, 1, v0
-; LMULMAX1-NEXT:    vsetivli zero, 8, e8, mf2, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 4
-; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
 ; LMULMAX1-NEXT:    vmsne.vi v0, v8, 0
 ; LMULMAX1-NEXT:    ret
   %z = fptoui <8 x float> %x to <8 x i1>
@@ -438,7 +436,7 @@ define void @fp2si_v8f64_v8i8(<8 x double>* %x, <8 x i8>* %y) {
 ; LMULMAX1-NEXT:    vncvt.x.x.w v11, v12
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
 ; LMULMAX1-NEXT:    vncvt.x.x.w v11, v11
-; LMULMAX1-NEXT:    vsetivli zero, 4, e8, mf2, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 4, e8, mf2, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v10, v11, 2
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; LMULMAX1-NEXT:    vfncvt.rtz.x.f.w v11, v9
@@ -446,7 +444,7 @@ define void @fp2si_v8f64_v8i8(<8 x double>* %x, <8 x i8>* %y) {
 ; LMULMAX1-NEXT:    vncvt.x.x.w v9, v11
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
 ; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
-; LMULMAX1-NEXT:    vsetivli zero, 6, e8, mf2, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 6, e8, mf2, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v10, v9, 4
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; LMULMAX1-NEXT:    vfncvt.rtz.x.f.w v9, v8
@@ -454,7 +452,7 @@ define void @fp2si_v8f64_v8i8(<8 x double>* %x, <8 x i8>* %y) {
 ; LMULMAX1-NEXT:    vncvt.x.x.w v8, v9
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
 ; LMULMAX1-NEXT:    vncvt.x.x.w v8, v8
-; LMULMAX1-NEXT:    vsetivli zero, 8, e8, mf2, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v10, v8, 6
 ; LMULMAX1-NEXT:    vse8.v v10, (a1)
 ; LMULMAX1-NEXT:    ret
@@ -500,7 +498,7 @@ define void @fp2ui_v8f64_v8i8(<8 x double>* %x, <8 x i8>* %y) {
 ; LMULMAX1-NEXT:    vncvt.x.x.w v11, v12
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
 ; LMULMAX1-NEXT:    vncvt.x.x.w v11, v11
-; LMULMAX1-NEXT:    vsetivli zero, 4, e8, mf2, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 4, e8, mf2, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v10, v11, 2
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; LMULMAX1-NEXT:    vfncvt.rtz.xu.f.w v11, v9
@@ -508,7 +506,7 @@ define void @fp2ui_v8f64_v8i8(<8 x double>* %x, <8 x i8>* %y) {
 ; LMULMAX1-NEXT:    vncvt.x.x.w v9, v11
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
 ; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
-; LMULMAX1-NEXT:    vsetivli zero, 6, e8, mf2, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 6, e8, mf2, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v10, v9, 4
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; LMULMAX1-NEXT:    vfncvt.rtz.xu.f.w v9, v8
@@ -516,7 +514,7 @@ define void @fp2ui_v8f64_v8i8(<8 x double>* %x, <8 x i8>* %y) {
 ; LMULMAX1-NEXT:    vncvt.x.x.w v8, v9
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
 ; LMULMAX1-NEXT:    vncvt.x.x.w v8, v8
-; LMULMAX1-NEXT:    vsetivli zero, 8, e8, mf2, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v10, v8, 6
 ; LMULMAX1-NEXT:    vse8.v v10, (a1)
 ; LMULMAX1-NEXT:    ret
@@ -551,7 +549,7 @@ define <8 x i1> @fp2si_v8f64_v8i1(<8 x double> %x) {
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
 ; LMULMAX1-NEXT:    vmv.v.i v9, 0
 ; LMULMAX1-NEXT:    vmerge.vim v13, v9, 1, v0
-; LMULMAX1-NEXT:    vsetivli zero, 4, e8, mf2, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 4, e8, mf2, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v12, v13, 2
 ; LMULMAX1-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; LMULMAX1-NEXT:    vmsne.vi v0, v12, 0
@@ -562,7 +560,7 @@ define <8 x i1> @fp2si_v8f64_v8i1(<8 x double> %x) {
 ; LMULMAX1-NEXT:    vmsne.vi v0, v10, 0
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
 ; LMULMAX1-NEXT:    vmerge.vim v10, v9, 1, v0
-; LMULMAX1-NEXT:    vsetivli zero, 6, e8, mf2, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 6, e8, mf2, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v12, v10, 4
 ; LMULMAX1-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; LMULMAX1-NEXT:    vmsne.vi v0, v12, 0
@@ -573,9 +571,8 @@ define <8 x i1> @fp2si_v8f64_v8i1(<8 x double> %x) {
 ; LMULMAX1-NEXT:    vmsne.vi v0, v10, 0
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
 ; LMULMAX1-NEXT:    vmerge.vim v9, v9, 1, v0
-; LMULMAX1-NEXT:    vsetivli zero, 8, e8, mf2, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 6
-; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
 ; LMULMAX1-NEXT:    vmsne.vi v0, v8, 0
 ; LMULMAX1-NEXT:    ret
   %z = fptosi <8 x double> %x to <8 x i1>
@@ -607,7 +604,7 @@ define <8 x i1> @fp2ui_v8f64_v8i1(<8 x double> %x) {
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
 ; LMULMAX1-NEXT:    vmv.v.i v9, 0
 ; LMULMAX1-NEXT:    vmerge.vim v13, v9, 1, v0
-; LMULMAX1-NEXT:    vsetivli zero, 4, e8, mf2, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 4, e8, mf2, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v12, v13, 2
 ; LMULMAX1-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; LMULMAX1-NEXT:    vmsne.vi v0, v12, 0
@@ -618,7 +615,7 @@ define <8 x i1> @fp2ui_v8f64_v8i1(<8 x double> %x) {
 ; LMULMAX1-NEXT:    vmsne.vi v0, v10, 0
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
 ; LMULMAX1-NEXT:    vmerge.vim v10, v9, 1, v0
-; LMULMAX1-NEXT:    vsetivli zero, 6, e8, mf2, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 6, e8, mf2, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v12, v10, 4
 ; LMULMAX1-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; LMULMAX1-NEXT:    vmsne.vi v0, v12, 0
@@ -629,9 +626,8 @@ define <8 x i1> @fp2ui_v8f64_v8i1(<8 x double> %x) {
 ; LMULMAX1-NEXT:    vmsne.vi v0, v10, 0
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
 ; LMULMAX1-NEXT:    vmerge.vim v9, v9, 1, v0
-; LMULMAX1-NEXT:    vsetivli zero, 8, e8, mf2, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 6
-; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
 ; LMULMAX1-NEXT:    vmsne.vi v0, v8, 0
 ; LMULMAX1-NEXT:    ret
   %z = fptoui <8 x double> %x to <8 x i1>

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll
index f932bdfd2817e..425ed99e44842 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll
@@ -482,19 +482,19 @@ define void @si2fp_v8i64_v8f16(<8 x i64>* %x, <8 x half>* %y) {
 ; LMULMAX1-NEXT:    vfncvt.f.x.w v12, v11
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; LMULMAX1-NEXT:    vfncvt.f.f.w v11, v12
-; LMULMAX1-NEXT:    vsetivli zero, 4, e16, m1, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 4, e16, m1, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v10, v11, 2
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; LMULMAX1-NEXT:    vfncvt.f.x.w v11, v9
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; LMULMAX1-NEXT:    vfncvt.f.f.w v9, v11
-; LMULMAX1-NEXT:    vsetivli zero, 6, e16, m1, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 6, e16, m1, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v10, v9, 4
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; LMULMAX1-NEXT:    vfncvt.f.x.w v9, v8
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; LMULMAX1-NEXT:    vfncvt.f.f.w v8, v9
-; LMULMAX1-NEXT:    vsetivli zero, 8, e16, m1, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v10, v8, 6
 ; LMULMAX1-NEXT:    vse16.v v10, (a1)
 ; LMULMAX1-NEXT:    ret
@@ -534,19 +534,19 @@ define void @ui2fp_v8i64_v8f16(<8 x i64>* %x, <8 x half>* %y) {
 ; LMULMAX1-NEXT:    vfncvt.f.xu.w v12, v11
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; LMULMAX1-NEXT:    vfncvt.f.f.w v11, v12
-; LMULMAX1-NEXT:    vsetivli zero, 4, e16, m1, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 4, e16, m1, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v10, v11, 2
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; LMULMAX1-NEXT:    vfncvt.f.xu.w v11, v9
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; LMULMAX1-NEXT:    vfncvt.f.f.w v9, v11
-; LMULMAX1-NEXT:    vsetivli zero, 6, e16, m1, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 6, e16, m1, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v10, v9, 4
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; LMULMAX1-NEXT:    vfncvt.f.xu.w v9, v8
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; LMULMAX1-NEXT:    vfncvt.f.f.w v8, v9
-; LMULMAX1-NEXT:    vsetivli zero, 8, e16, m1, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v10, v8, 6
 ; LMULMAX1-NEXT:    vse16.v v10, (a1)
 ; LMULMAX1-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-i1.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-i1.ll
index 4bc46c60a87f6..44af762e51af9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-i1.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-i1.ll
@@ -28,7 +28,7 @@ define <1 x i1> @insertelt_idx_v1i1(<1 x i1> %x, i1 %elt, i32 zeroext %idx) noun
 ; RV32-NEXT:    vmv.v.i v9, 0
 ; RV32-NEXT:    vmerge.vim v9, v9, 1, v0
 ; RV32-NEXT:    addi a0, a1, 1
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
+; RV32-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
 ; RV32-NEXT:    vslideup.vx v9, v8, a1
 ; RV32-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; RV32-NEXT:    vand.vi v8, v9, 1
@@ -43,7 +43,7 @@ define <1 x i1> @insertelt_idx_v1i1(<1 x i1> %x, i1 %elt, i32 zeroext %idx) noun
 ; RV64-NEXT:    vmerge.vim v9, v9, 1, v0
 ; RV64-NEXT:    sext.w a0, a1
 ; RV64-NEXT:    addi a1, a0, 1
-; RV64-NEXT:    vsetvli zero, a1, e8, mf8, tu, mu
+; RV64-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; RV64-NEXT:    vslideup.vx v9, v8, a0
 ; RV64-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; RV64-NEXT:    vand.vi v8, v9, 1
@@ -60,9 +60,7 @@ define <2 x i1> @insertelt_v2i1(<2 x i1> %x, i1 %elt) nounwind {
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    vmv.v.i v9, 0
 ; CHECK-NEXT:    vmerge.vim v9, v9, 1, v0
-; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, tu, mu
 ; CHECK-NEXT:    vslideup.vi v9, v8, 1
-; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
 ; CHECK-NEXT:    vand.vi v8, v9, 1
 ; CHECK-NEXT:    vmsne.vi v0, v8, 0
 ; CHECK-NEXT:    ret
@@ -78,7 +76,7 @@ define <2 x i1> @insertelt_idx_v2i1(<2 x i1> %x, i1 %elt, i32 zeroext %idx) noun
 ; RV32-NEXT:    vmv.v.i v9, 0
 ; RV32-NEXT:    vmerge.vim v9, v9, 1, v0
 ; RV32-NEXT:    addi a0, a1, 1
-; RV32-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
+; RV32-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
 ; RV32-NEXT:    vslideup.vx v9, v8, a1
 ; RV32-NEXT:    vsetivli zero, 2, e8, mf8, ta, mu
 ; RV32-NEXT:    vand.vi v8, v9, 1
@@ -93,7 +91,7 @@ define <2 x i1> @insertelt_idx_v2i1(<2 x i1> %x, i1 %elt, i32 zeroext %idx) noun
 ; RV64-NEXT:    vmerge.vim v9, v9, 1, v0
 ; RV64-NEXT:    sext.w a0, a1
 ; RV64-NEXT:    addi a1, a0, 1
-; RV64-NEXT:    vsetvli zero, a1, e8, mf8, tu, mu
+; RV64-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; RV64-NEXT:    vslideup.vx v9, v8, a0
 ; RV64-NEXT:    vsetivli zero, 2, e8, mf8, ta, mu
 ; RV64-NEXT:    vand.vi v8, v9, 1
@@ -110,7 +108,7 @@ define <8 x i1> @insertelt_v8i1(<8 x i1> %x, i1 %elt) nounwind {
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    vmv.v.i v9, 0
 ; CHECK-NEXT:    vmerge.vim v9, v9, 1, v0
-; CHECK-NEXT:    vsetivli zero, 2, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 2, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v9, v8, 1
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vand.vi v8, v9, 1
@@ -128,7 +126,7 @@ define <8 x i1> @insertelt_idx_v8i1(<8 x i1> %x, i1 %elt, i32 zeroext %idx) noun
 ; RV32-NEXT:    vmv.v.i v9, 0
 ; RV32-NEXT:    vmerge.vim v9, v9, 1, v0
 ; RV32-NEXT:    addi a0, a1, 1
-; RV32-NEXT:    vsetvli zero, a0, e8, mf2, tu, mu
+; RV32-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
 ; RV32-NEXT:    vslideup.vx v9, v8, a1
 ; RV32-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; RV32-NEXT:    vand.vi v8, v9, 1
@@ -143,7 +141,7 @@ define <8 x i1> @insertelt_idx_v8i1(<8 x i1> %x, i1 %elt, i32 zeroext %idx) noun
 ; RV64-NEXT:    vmerge.vim v9, v9, 1, v0
 ; RV64-NEXT:    sext.w a0, a1
 ; RV64-NEXT:    addi a1, a0, 1
-; RV64-NEXT:    vsetvli zero, a1, e8, mf2, tu, mu
+; RV64-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; RV64-NEXT:    vslideup.vx v9, v8, a0
 ; RV64-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; RV64-NEXT:    vand.vi v8, v9, 1
@@ -161,7 +159,7 @@ define <64 x i1> @insertelt_v64i1(<64 x i1> %x, i1 %elt) nounwind {
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    vmv.v.i v12, 0
 ; CHECK-NEXT:    vmerge.vim v12, v12, 1, v0
-; CHECK-NEXT:    vsetivli zero, 2, e8, m4, tu, mu
+; CHECK-NEXT:    vsetivli zero, 2, e8, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vi v12, v8, 1
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
 ; CHECK-NEXT:    vand.vi v8, v12, 1
@@ -180,7 +178,7 @@ define <64 x i1> @insertelt_idx_v64i1(<64 x i1> %x, i1 %elt, i32 zeroext %idx) n
 ; RV32-NEXT:    vmv.v.i v12, 0
 ; RV32-NEXT:    vmerge.vim v12, v12, 1, v0
 ; RV32-NEXT:    addi a0, a1, 1
-; RV32-NEXT:    vsetvli zero, a0, e8, m4, tu, mu
+; RV32-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
 ; RV32-NEXT:    vslideup.vx v12, v8, a1
 ; RV32-NEXT:    vsetvli zero, a2, e8, m4, ta, mu
 ; RV32-NEXT:    vand.vi v8, v12, 1
@@ -196,7 +194,7 @@ define <64 x i1> @insertelt_idx_v64i1(<64 x i1> %x, i1 %elt, i32 zeroext %idx) n
 ; RV64-NEXT:    vmerge.vim v12, v12, 1, v0
 ; RV64-NEXT:    sext.w a0, a1
 ; RV64-NEXT:    addi a1, a0, 1
-; RV64-NEXT:    vsetvli zero, a1, e8, m4, tu, mu
+; RV64-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
 ; RV64-NEXT:    vslideup.vx v12, v8, a0
 ; RV64-NEXT:    vsetvli zero, a2, e8, m4, ta, mu
 ; RV64-NEXT:    vand.vi v8, v12, 1

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
index 6bee4f83a47d2..abfddc19adb06 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
@@ -14,7 +14,7 @@ define <vscale x 8 x i32> @insert_nxv8i32_v2i32_0(<vscale x 8 x i32> %vec, <2 x
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; CHECK-NEXT:    vle32.v v12, (a0)
-; CHECK-NEXT:    vsetivli zero, 2, e32, m4, tu, mu
+; CHECK-NEXT:    vsetivli zero, 2, e32, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 0
 ; CHECK-NEXT:    ret
   %sv = load <2 x i32>, <2 x i32>* %svp
@@ -27,7 +27,7 @@ define <vscale x 8 x i32> @insert_nxv8i32_v2i32_2(<vscale x 8 x i32> %vec, <2 x
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; CHECK-NEXT:    vle32.v v12, (a0)
-; CHECK-NEXT:    vsetivli zero, 4, e32, m4, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e32, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 2
 ; CHECK-NEXT:    ret
   %sv = load <2 x i32>, <2 x i32>* %svp
@@ -40,7 +40,7 @@ define <vscale x 8 x i32> @insert_nxv8i32_v2i32_6(<vscale x 8 x i32> %vec, <2 x
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; CHECK-NEXT:    vle32.v v12, (a0)
-; CHECK-NEXT:    vsetivli zero, 8, e32, m4, tu, mu
+; CHECK-NEXT:    vsetivli zero, 8, e32, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 6
 ; CHECK-NEXT:    ret
   %sv = load <2 x i32>, <2 x i32>* %svp
@@ -53,7 +53,7 @@ define <vscale x 8 x i32> @insert_nxv8i32_v8i32_0(<vscale x 8 x i32> %vec, <8 x
 ; LMULMAX2:       # %bb.0:
 ; LMULMAX2-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
 ; LMULMAX2-NEXT:    vle32.v v12, (a0)
-; LMULMAX2-NEXT:    vsetivli zero, 8, e32, m4, tu, mu
+; LMULMAX2-NEXT:    vsetivli zero, 8, e32, m4, ta, mu
 ; LMULMAX2-NEXT:    vslideup.vi v8, v12, 0
 ; LMULMAX2-NEXT:    ret
 ;
@@ -63,9 +63,9 @@ define <vscale x 8 x i32> @insert_nxv8i32_v8i32_0(<vscale x 8 x i32> %vec, <8 x
 ; LMULMAX1-NEXT:    vle32.v v12, (a0)
 ; LMULMAX1-NEXT:    addi a0, a0, 16
 ; LMULMAX1-NEXT:    vle32.v v16, (a0)
-; LMULMAX1-NEXT:    vsetivli zero, 4, e32, m4, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 4, e32, m4, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v12, 0
-; LMULMAX1-NEXT:    vsetivli zero, 8, e32, m4, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 8, e32, m4, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v16, 4
 ; LMULMAX1-NEXT:    ret
   %sv = load <8 x i32>, <8 x i32>* %svp
@@ -78,7 +78,7 @@ define <vscale x 8 x i32> @insert_nxv8i32_v8i32_8(<vscale x 8 x i32> %vec, <8 x
 ; LMULMAX2:       # %bb.0:
 ; LMULMAX2-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
 ; LMULMAX2-NEXT:    vle32.v v12, (a0)
-; LMULMAX2-NEXT:    vsetivli zero, 16, e32, m4, tu, mu
+; LMULMAX2-NEXT:    vsetivli zero, 16, e32, m4, ta, mu
 ; LMULMAX2-NEXT:    vslideup.vi v8, v12, 8
 ; LMULMAX2-NEXT:    ret
 ;
@@ -88,9 +88,9 @@ define <vscale x 8 x i32> @insert_nxv8i32_v8i32_8(<vscale x 8 x i32> %vec, <8 x
 ; LMULMAX1-NEXT:    vle32.v v12, (a0)
 ; LMULMAX1-NEXT:    addi a0, a0, 16
 ; LMULMAX1-NEXT:    vle32.v v16, (a0)
-; LMULMAX1-NEXT:    vsetivli zero, 12, e32, m4, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 12, e32, m4, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v12, 8
-; LMULMAX1-NEXT:    vsetivli zero, 16, e32, m4, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 16, e32, m4, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v16, 12
 ; LMULMAX1-NEXT:    ret
   %sv = load <8 x i32>, <8 x i32>* %svp
@@ -116,7 +116,7 @@ define void @insert_v4i32_v2i32_0(<4 x i32>* %vp, <2 x i32>* %svp) {
 ; CHECK-NEXT:    vle32.v v8, (a1)
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; CHECK-NEXT:    vle32.v v9, (a0)
-; CHECK-NEXT:    vsetivli zero, 2, e32, m1, tu, mu
+; CHECK-NEXT:    vsetivli zero, 2, e32, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vi v9, v8, 0
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; CHECK-NEXT:    vse32.v v9, (a0)
@@ -135,7 +135,6 @@ define void @insert_v4i32_v2i32_2(<4 x i32>* %vp, <2 x i32>* %svp) {
 ; CHECK-NEXT:    vle32.v v8, (a1)
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; CHECK-NEXT:    vle32.v v9, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, tu, mu
 ; CHECK-NEXT:    vslideup.vi v9, v8, 2
 ; CHECK-NEXT:    vse32.v v9, (a0)
 ; CHECK-NEXT:    ret
@@ -167,7 +166,7 @@ define void @insert_v8i32_v2i32_0(<8 x i32>* %vp, <2 x i32>* %svp) {
 ; LMULMAX2-NEXT:    vle32.v v8, (a1)
 ; LMULMAX2-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
 ; LMULMAX2-NEXT:    vle32.v v10, (a0)
-; LMULMAX2-NEXT:    vsetivli zero, 2, e32, m2, tu, mu
+; LMULMAX2-NEXT:    vsetivli zero, 2, e32, m2, ta, mu
 ; LMULMAX2-NEXT:    vslideup.vi v10, v8, 0
 ; LMULMAX2-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
 ; LMULMAX2-NEXT:    vse32.v v10, (a0)
@@ -179,7 +178,7 @@ define void @insert_v8i32_v2i32_0(<8 x i32>* %vp, <2 x i32>* %svp) {
 ; LMULMAX1-NEXT:    vle32.v v8, (a1)
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; LMULMAX1-NEXT:    vle32.v v9, (a0)
-; LMULMAX1-NEXT:    vsetivli zero, 2, e32, m1, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 2, e32, m1, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v9, v8, 0
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; LMULMAX1-NEXT:    vse32.v v9, (a0)
@@ -198,7 +197,7 @@ define void @insert_v8i32_v2i32_2(<8 x i32>* %vp, <2 x i32>* %svp) {
 ; LMULMAX2-NEXT:    vle32.v v8, (a1)
 ; LMULMAX2-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
 ; LMULMAX2-NEXT:    vle32.v v10, (a0)
-; LMULMAX2-NEXT:    vsetivli zero, 4, e32, m2, tu, mu
+; LMULMAX2-NEXT:    vsetivli zero, 4, e32, m2, ta, mu
 ; LMULMAX2-NEXT:    vslideup.vi v10, v8, 2
 ; LMULMAX2-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
 ; LMULMAX2-NEXT:    vse32.v v10, (a0)
@@ -210,7 +209,6 @@ define void @insert_v8i32_v2i32_2(<8 x i32>* %vp, <2 x i32>* %svp) {
 ; LMULMAX1-NEXT:    vle32.v v8, (a1)
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; LMULMAX1-NEXT:    vle32.v v9, (a0)
-; LMULMAX1-NEXT:    vsetvli zero, zero, e32, m1, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v9, v8, 2
 ; LMULMAX1-NEXT:    vse32.v v9, (a0)
 ; LMULMAX1-NEXT:    ret
@@ -228,7 +226,6 @@ define void @insert_v8i32_v2i32_6(<8 x i32>* %vp, <2 x i32>* %svp) {
 ; LMULMAX2-NEXT:    vle32.v v8, (a1)
 ; LMULMAX2-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
 ; LMULMAX2-NEXT:    vle32.v v10, (a0)
-; LMULMAX2-NEXT:    vsetvli zero, zero, e32, m2, tu, mu
 ; LMULMAX2-NEXT:    vslideup.vi v10, v8, 6
 ; LMULMAX2-NEXT:    vse32.v v10, (a0)
 ; LMULMAX2-NEXT:    ret
@@ -240,7 +237,6 @@ define void @insert_v8i32_v2i32_6(<8 x i32>* %vp, <2 x i32>* %svp) {
 ; LMULMAX1-NEXT:    addi a0, a0, 16
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; LMULMAX1-NEXT:    vle32.v v9, (a0)
-; LMULMAX1-NEXT:    vsetvli zero, zero, e32, m1, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v9, v8, 2
 ; LMULMAX1-NEXT:    vse32.v v9, (a0)
 ; LMULMAX1-NEXT:    ret
@@ -283,7 +279,7 @@ define void @insert_v4i16_v2i16_0(<4 x i16>* %vp, <2 x i16>* %svp) {
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
 ; CHECK-NEXT:    vle16.v v9, (a1)
-; CHECK-NEXT:    vsetivli zero, 2, e16, mf2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 2, e16, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 0
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
 ; CHECK-NEXT:    vse16.v v8, (a0)
@@ -302,7 +298,7 @@ define void @insert_v4i16_v2i16_2(<4 x i16>* %vp, <2 x i16>* %svp) {
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
 ; CHECK-NEXT:    vle16.v v9, (a1)
-; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 2
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
@@ -321,7 +317,7 @@ define void @insert_v32i1_v8i1_0(<32 x i1>* %vp, <8 x i1>* %svp) {
 ; LMULMAX2-NEXT:    vlm.v v8, (a0)
 ; LMULMAX2-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; LMULMAX2-NEXT:    vlm.v v9, (a1)
-; LMULMAX2-NEXT:    vsetivli zero, 1, e8, mf4, tu, mu
+; LMULMAX2-NEXT:    vsetivli zero, 1, e8, mf4, ta, mu
 ; LMULMAX2-NEXT:    vslideup.vi v8, v9, 0
 ; LMULMAX2-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
 ; LMULMAX2-NEXT:    vsm.v v8, (a0)
@@ -333,7 +329,7 @@ define void @insert_v32i1_v8i1_0(<32 x i1>* %vp, <8 x i1>* %svp) {
 ; LMULMAX1-NEXT:    vlm.v v8, (a0)
 ; LMULMAX1-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; LMULMAX1-NEXT:    vlm.v v9, (a1)
-; LMULMAX1-NEXT:    vsetivli zero, 1, e8, mf8, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 0
 ; LMULMAX1-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
 ; LMULMAX1-NEXT:    vsm.v v8, (a0)
@@ -353,7 +349,7 @@ define void @insert_v32i1_v8i1_16(<32 x i1>* %vp, <8 x i1>* %svp) {
 ; LMULMAX2-NEXT:    vlm.v v8, (a0)
 ; LMULMAX2-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; LMULMAX2-NEXT:    vlm.v v9, (a1)
-; LMULMAX2-NEXT:    vsetivli zero, 3, e8, mf4, tu, mu
+; LMULMAX2-NEXT:    vsetivli zero, 3, e8, mf4, ta, mu
 ; LMULMAX2-NEXT:    vslideup.vi v8, v9, 2
 ; LMULMAX2-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
 ; LMULMAX2-NEXT:    vsm.v v8, (a0)
@@ -366,7 +362,7 @@ define void @insert_v32i1_v8i1_16(<32 x i1>* %vp, <8 x i1>* %svp) {
 ; LMULMAX1-NEXT:    vlm.v v8, (a0)
 ; LMULMAX1-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; LMULMAX1-NEXT:    vlm.v v9, (a1)
-; LMULMAX1-NEXT:    vsetivli zero, 1, e8, mf8, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 0
 ; LMULMAX1-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
 ; LMULMAX1-NEXT:    vsm.v v8, (a0)
@@ -392,7 +388,7 @@ define void @insert_v8i1_v4i1_0(<8 x i1>* %vp, <4 x i1>* %svp) {
 ; CHECK-NEXT:    vmv.v.i v10, 0
 ; CHECK-NEXT:    vmv1r.v v0, v8
 ; CHECK-NEXT:    vmerge.vim v8, v10, 1, v0
-; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v9, v8, 0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmsne.vi v8, v9, 0
@@ -419,9 +415,8 @@ define void @insert_v8i1_v4i1_4(<8 x i1>* %vp, <4 x i1>* %svp) {
 ; CHECK-NEXT:    vmv.v.i v10, 0
 ; CHECK-NEXT:    vmv1r.v v0, v8
 ; CHECK-NEXT:    vmerge.vim v8, v10, 1, v0
-; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v9, v8, 4
-; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmsne.vi v8, v9, 0
 ; CHECK-NEXT:    vsm.v v8, (a0)
 ; CHECK-NEXT:    ret
@@ -437,7 +432,7 @@ define <vscale x 2 x i16> @insert_nxv2i16_v2i16_0(<vscale x 2 x i16> %v, <2 x i1
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
 ; CHECK-NEXT:    vle16.v v9, (a0)
-; CHECK-NEXT:    vsetivli zero, 2, e16, mf2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 2, e16, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 0
 ; CHECK-NEXT:    ret
   %sv = load <2 x i16>, <2 x i16>* %svp
@@ -450,7 +445,7 @@ define <vscale x 2 x i16> @insert_nxv2i16_v2i16_2(<vscale x 2 x i16> %v, <2 x i1
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
 ; CHECK-NEXT:    vle16.v v9, (a0)
-; CHECK-NEXT:    vsetivli zero, 6, e16, mf2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 6, e16, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 4
 ; CHECK-NEXT:    ret
   %sv = load <2 x i16>, <2 x i16>* %svp
@@ -470,7 +465,6 @@ define <vscale x 2 x i1> @insert_nxv2i1_v4i1_0(<vscale x 2 x i1> %v, <4 x i1>* %
 ; CHECK-NEXT:    vmv.v.i v10, 0
 ; CHECK-NEXT:    vmv1r.v v0, v8
 ; CHECK-NEXT:    vmerge.vim v8, v10, 1, v0
-; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, tu, mu
 ; CHECK-NEXT:    vslideup.vi v9, v8, 0
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, mu
 ; CHECK-NEXT:    vmsne.vi v0, v9, 0
@@ -485,7 +479,7 @@ define <vscale x 8 x i1> @insert_nxv8i1_v4i1_0(<vscale x 8 x i1> %v, <8 x i1>* %
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vlm.v v8, (a0)
-; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, tu, mu
+; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vslideup.vi v0, v8, 0
 ; CHECK-NEXT:    ret
   %sv = load <8 x i1>, <8 x i1>* %svp
@@ -498,7 +492,7 @@ define <vscale x 8 x i1> @insert_nxv8i1_v8i1_16(<vscale x 8 x i1> %v, <8 x i1>*
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vlm.v v8, (a0)
-; CHECK-NEXT:    vsetivli zero, 3, e8, mf8, tu, mu
+; CHECK-NEXT:    vsetivli zero, 3, e8, mf8, ta, mu
 ; CHECK-NEXT:    vslideup.vi v0, v8, 2
 ; CHECK-NEXT:    ret
   %sv = load <8 x i1>, <8 x i1>* %svp
@@ -514,7 +508,7 @@ define void @insert_v2i64_nxv16i64(<2 x i64>* %psv0, <2 x i64>* %psv1, <vscale x
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
 ; CHECK-NEXT:    vle64.v v16, (a1)
-; CHECK-NEXT:    vsetivli zero, 6, e64, m8, tu, mu
+; CHECK-NEXT:    vsetivli zero, 6, e64, m8, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v16, 4
 ; CHECK-NEXT:    vs8r.v v8, (a2)
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll
index acd0361ec0828..57506bed48b41 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll
@@ -14,7 +14,7 @@ define void @insertelt_v4i64(<4 x i64>* %x, i64 %y) {
 ; RV32-NEXT:    vmv.v.i v10, 0
 ; RV32-NEXT:    vslide1up.vx v12, v10, a2
 ; RV32-NEXT:    vslide1up.vx v10, v12, a1
-; RV32-NEXT:    vsetivli zero, 4, e64, m2, tu, mu
+; RV32-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; RV32-NEXT:    vslideup.vi v8, v10, 3
 ; RV32-NEXT:    vse64.v v8, (a0)
 ; RV32-NEXT:    ret
@@ -24,7 +24,6 @@ define void @insertelt_v4i64(<4 x i64>* %x, i64 %y) {
 ; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; RV64-NEXT:    vle64.v v8, (a0)
 ; RV64-NEXT:    vmv.s.x v10, a1
-; RV64-NEXT:    vsetvli zero, zero, e64, m2, tu, mu
 ; RV64-NEXT:    vslideup.vi v8, v10, 3
 ; RV64-NEXT:    vse64.v v8, (a0)
 ; RV64-NEXT:    ret
@@ -49,13 +48,13 @@ define void @insertelt_v3i64(<3 x i64>* %x, i64 %y) {
 ; RV32-NEXT:    vlse32.v v10, (a4), zero
 ; RV32-NEXT:    vsetvli zero, zero, e32, m1, tu, mu
 ; RV32-NEXT:    vmv.s.x v10, a3
-; RV32-NEXT:    vsetvli zero, zero, e64, m2, tu, mu
+; RV32-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; RV32-NEXT:    vslideup.vi v8, v10, 2
 ; RV32-NEXT:    vsetivli zero, 2, e32, m2, ta, mu
 ; RV32-NEXT:    vmv.v.i v10, 0
 ; RV32-NEXT:    vslide1up.vx v12, v10, a2
 ; RV32-NEXT:    vslide1up.vx v10, v12, a1
-; RV32-NEXT:    vsetivli zero, 3, e64, m2, tu, mu
+; RV32-NEXT:    vsetivli zero, 3, e64, m2, ta, mu
 ; RV32-NEXT:    vslideup.vi v8, v10, 2
 ; RV32-NEXT:    sw a1, 16(a0)
 ; RV32-NEXT:    sw a2, 20(a0)
@@ -79,7 +78,7 @@ define void @insertelt_v16i8(<16 x i8>* %x, i8 %y) {
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vmv.s.x v9, a1
-; CHECK-NEXT:    vsetivli zero, 15, e8, m1, tu, mu
+; CHECK-NEXT:    vsetivli zero, 15, e8, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 14
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
 ; CHECK-NEXT:    vse8.v v8, (a0)
@@ -98,7 +97,7 @@ define void @insertelt_v32i16(<32 x i16>* %x, i16 %y, i32 %idx) {
 ; RV32-NEXT:    vle16.v v8, (a0)
 ; RV32-NEXT:    vmv.s.x v12, a1
 ; RV32-NEXT:    addi a1, a2, 1
-; RV32-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
+; RV32-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; RV32-NEXT:    vslideup.vx v8, v12, a2
 ; RV32-NEXT:    vsetvli zero, a3, e16, m4, ta, mu
 ; RV32-NEXT:    vse16.v v8, (a0)
@@ -112,7 +111,7 @@ define void @insertelt_v32i16(<32 x i16>* %x, i16 %y, i32 %idx) {
 ; RV64-NEXT:    vmv.s.x v12, a1
 ; RV64-NEXT:    sext.w a1, a2
 ; RV64-NEXT:    addi a2, a1, 1
-; RV64-NEXT:    vsetvli zero, a2, e16, m4, tu, mu
+; RV64-NEXT:    vsetvli zero, a2, e16, m4, ta, mu
 ; RV64-NEXT:    vslideup.vx v8, v12, a1
 ; RV64-NEXT:    vsetvli zero, a3, e16, m4, ta, mu
 ; RV64-NEXT:    vse16.v v8, (a0)
@@ -130,7 +129,7 @@ define void @insertelt_v8f32(<8 x float>* %x, float %y, i32 %idx) {
 ; RV32-NEXT:    vle32.v v8, (a0)
 ; RV32-NEXT:    vfmv.s.f v10, fa0
 ; RV32-NEXT:    addi a2, a1, 1
-; RV32-NEXT:    vsetvli zero, a2, e32, m2, tu, mu
+; RV32-NEXT:    vsetvli zero, a2, e32, m2, ta, mu
 ; RV32-NEXT:    vslideup.vx v8, v10, a1
 ; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
 ; RV32-NEXT:    vse32.v v8, (a0)
@@ -143,7 +142,7 @@ define void @insertelt_v8f32(<8 x float>* %x, float %y, i32 %idx) {
 ; RV64-NEXT:    vfmv.s.f v10, fa0
 ; RV64-NEXT:    sext.w a1, a1
 ; RV64-NEXT:    addi a2, a1, 1
-; RV64-NEXT:    vsetvli zero, a2, e32, m2, tu, mu
+; RV64-NEXT:    vsetvli zero, a2, e32, m2, ta, mu
 ; RV64-NEXT:    vslideup.vx v8, v10, a1
 ; RV64-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
 ; RV64-NEXT:    vse32.v v8, (a0)
@@ -178,7 +177,7 @@ define void @insertelt_v8i64(<8 x i64>* %x, i32 %idx) {
 ; RV32-NEXT:    li a2, -1
 ; RV32-NEXT:    vmv.s.x v12, a2
 ; RV32-NEXT:    addi a2, a1, 1
-; RV32-NEXT:    vsetvli zero, a2, e64, m4, tu, mu
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
 ; RV32-NEXT:    vslideup.vx v8, v12, a1
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vse64.v v8, (a0)
@@ -192,7 +191,7 @@ define void @insertelt_v8i64(<8 x i64>* %x, i32 %idx) {
 ; RV64-NEXT:    vmv.s.x v12, a2
 ; RV64-NEXT:    sext.w a1, a1
 ; RV64-NEXT:    addi a2, a1, 1
-; RV64-NEXT:    vsetvli zero, a2, e64, m4, tu, mu
+; RV64-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
 ; RV64-NEXT:    vslideup.vx v8, v12, a1
 ; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV64-NEXT:    vse64.v v8, (a0)
@@ -227,7 +226,7 @@ define void @insertelt_c6_v8i64(<8 x i64>* %x, i32 %idx) {
 ; RV32-NEXT:    li a2, 6
 ; RV32-NEXT:    vmv.s.x v12, a2
 ; RV32-NEXT:    addi a2, a1, 1
-; RV32-NEXT:    vsetvli zero, a2, e64, m4, tu, mu
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
 ; RV32-NEXT:    vslideup.vx v8, v12, a1
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vse64.v v8, (a0)
@@ -241,7 +240,7 @@ define void @insertelt_c6_v8i64(<8 x i64>* %x, i32 %idx) {
 ; RV64-NEXT:    vmv.s.x v12, a2
 ; RV64-NEXT:    sext.w a1, a1
 ; RV64-NEXT:    addi a2, a1, 1
-; RV64-NEXT:    vsetvli zero, a2, e64, m4, tu, mu
+; RV64-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
 ; RV64-NEXT:    vslideup.vx v8, v12, a1
 ; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV64-NEXT:    vse64.v v8, (a0)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll
index 8c9d056b0bda0..651fcfa33b67c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll
@@ -195,7 +195,7 @@ define <4 x i64> @buildvec_vid_step1_add0_v4i64() {
 ; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; RV32-NEXT:    vmv.s.x v9, a0
 ; RV32-NEXT:    vmv.v.i v8, 0
-; RV32-NEXT:    vsetivli zero, 3, e32, m1, tu, mu
+; RV32-NEXT:    vsetivli zero, 3, e32, m1, ta, mu
 ; RV32-NEXT:    vslideup.vi v8, v9, 2
 ; RV32-NEXT:    lui a0, %hi(.LCPI12_0)
 ; RV32-NEXT:    addi a0, a0, %lo(.LCPI12_0)
@@ -219,7 +219,7 @@ define <4 x i64> @buildvec_vid_step2_add0_v4i64() {
 ; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; RV32-NEXT:    vmv.s.x v9, a0
 ; RV32-NEXT:    vmv.v.i v8, 0
-; RV32-NEXT:    vsetivli zero, 3, e32, m1, tu, mu
+; RV32-NEXT:    vsetivli zero, 3, e32, m1, ta, mu
 ; RV32-NEXT:    vslideup.vi v8, v9, 2
 ; RV32-NEXT:    lui a0, %hi(.LCPI13_0)
 ; RV32-NEXT:    addi a0, a0, %lo(.LCPI13_0)
@@ -312,7 +312,7 @@ define void @buildvec_dominant0_v8i16(<8 x i16>* %x) {
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
 ; CHECK-NEXT:    vmv.s.x v8, zero
 ; CHECK-NEXT:    vmv.v.i v9, 8
-; CHECK-NEXT:    vsetivli zero, 4, e16, m1, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e16, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vi v9, v8, 3
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
 ; CHECK-NEXT:    vse16.v v9, (a0)
@@ -549,14 +549,13 @@ define void @buildvec_vid_step1o2_v4i32(<4 x i32>* %z0, <4 x i32>* %z1, <4 x i32
 ; RV32-NEXT:    vse32.v v8, (a3)
 ; RV32-NEXT:    vse32.v v8, (a4)
 ; RV32-NEXT:    vmv.s.x v8, zero
-; RV32-NEXT:    vsetivli zero, 2, e32, m1, tu, mu
+; RV32-NEXT:    vsetivli zero, 2, e32, m1, ta, mu
 ; RV32-NEXT:    vslideup.vi v9, v8, 1
 ; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; RV32-NEXT:    vse32.v v9, (a5)
 ; RV32-NEXT:    li a0, 1
 ; RV32-NEXT:    vmv.s.x v8, a0
 ; RV32-NEXT:    vmv.v.i v9, 0
-; RV32-NEXT:    vsetvli zero, zero, e32, m1, tu, mu
 ; RV32-NEXT:    vslideup.vi v9, v8, 3
 ; RV32-NEXT:    vse32.v v9, (a6)
 ; RV32-NEXT:    ret
@@ -573,14 +572,13 @@ define void @buildvec_vid_step1o2_v4i32(<4 x i32>* %z0, <4 x i32>* %z1, <4 x i32
 ; RV64-NEXT:    vse32.v v8, (a3)
 ; RV64-NEXT:    vse32.v v8, (a4)
 ; RV64-NEXT:    vmv.s.x v8, zero
-; RV64-NEXT:    vsetivli zero, 2, e32, m1, tu, mu
+; RV64-NEXT:    vsetivli zero, 2, e32, m1, ta, mu
 ; RV64-NEXT:    vslideup.vi v9, v8, 1
 ; RV64-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; RV64-NEXT:    vse32.v v9, (a5)
 ; RV64-NEXT:    li a0, 1
 ; RV64-NEXT:    vmv.s.x v8, a0
 ; RV64-NEXT:    vmv.v.i v9, 0
-; RV64-NEXT:    vsetvli zero, zero, e32, m1, tu, mu
 ; RV64-NEXT:    vslideup.vi v9, v8, 3
 ; RV64-NEXT:    vse32.v v9, (a6)
 ; RV64-NEXT:    ret
@@ -612,13 +610,12 @@ define void @buildvec_vid_step1o2_add3_v4i16(<4 x i16>* %z0, <4 x i16>* %z1, <4
 ; CHECK-NEXT:    li a0, 3
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    vmv.v.i v10, 4
-; CHECK-NEXT:    vsetivli zero, 2, e16, mf2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 2, e16, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v10, v8, 1
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
 ; CHECK-NEXT:    vse16.v v10, (a5)
 ; CHECK-NEXT:    li a0, 4
 ; CHECK-NEXT:    vmv.s.x v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, tu, mu
 ; CHECK-NEXT:    vslideup.vi v9, v8, 3
 ; CHECK-NEXT:    vse16.v v9, (a6)
 ; CHECK-NEXT:    ret
@@ -755,7 +752,7 @@ define <16 x i8> @buildvec_not_vid_v16i8() {
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
 ; CHECK-NEXT:    vmv.s.x v9, a0
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    vsetivli zero, 7, e8, m1, tu, mu
+; CHECK-NEXT:    vsetivli zero, 7, e8, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 6
 ; CHECK-NEXT:    ret
   ret <16 x i8> <i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 3, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 0, i8 0>

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll
index ca6f78dce853d..333da22aa3a36 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll
@@ -217,7 +217,7 @@ define void @trunc_v8i8_v8i32(<8 x i32>* %x, <8 x i8>* %z) {
 ; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
 ; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
-; LMULMAX1-NEXT:    vsetivli zero, 8, e8, mf2, tu, mu
+; LMULMAX1-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 4
 ; LMULMAX1-NEXT:    vse8.v v8, (a1)
 ; LMULMAX1-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll
index 061d48b7add0b..0af0a192bf7a0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll
@@ -192,41 +192,37 @@ define <8 x i64> @vrgather_shuffle_vv_v8i64(<8 x i64> %x, <8 x i64> %y) {
 ; RV32-NEXT:    li a0, 5
 ; RV32-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
 ; RV32-NEXT:    vmv.s.x v16, a0
-; RV32-NEXT:    vmv.v.i v20, 2
-; RV32-NEXT:    vsetvli zero, zero, e16, m1, tu, mu
-; RV32-NEXT:    vslideup.vi v20, v16, 7
 ; RV32-NEXT:    lui a0, %hi(.LCPI11_0)
 ; RV32-NEXT:    addi a0, a0, %lo(.LCPI11_0)
-; RV32-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; RV32-NEXT:    vle16.v v21, (a0)
+; RV32-NEXT:    vle16.v v20, (a0)
+; RV32-NEXT:    vmv.v.i v21, 2
+; RV32-NEXT:    vslideup.vi v21, v16, 7
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; RV32-NEXT:    vrgatherei16.vv v16, v8, v21
+; RV32-NEXT:    vrgatherei16.vv v16, v8, v20
 ; RV32-NEXT:    li a0, 164
 ; RV32-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; RV32-NEXT:    vmv.s.x v0, a0
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
-; RV32-NEXT:    vrgatherei16.vv v16, v12, v20, v0.t
+; RV32-NEXT:    vrgatherei16.vv v16, v12, v21, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v16
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vrgather_shuffle_vv_v8i64:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    li a0, 5
 ; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
-; RV64-NEXT:    vmv.s.x v16, a0
-; RV64-NEXT:    vmv.v.i v20, 2
-; RV64-NEXT:    vsetvli zero, zero, e64, m4, tu, mu
-; RV64-NEXT:    vslideup.vi v20, v16, 7
 ; RV64-NEXT:    lui a0, %hi(.LCPI11_0)
 ; RV64-NEXT:    addi a0, a0, %lo(.LCPI11_0)
-; RV64-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; RV64-NEXT:    vle64.v v24, (a0)
-; RV64-NEXT:    vrgather.vv v16, v8, v24
+; RV64-NEXT:    vle64.v v20, (a0)
+; RV64-NEXT:    li a0, 5
+; RV64-NEXT:    vmv.s.x v16, a0
+; RV64-NEXT:    vmv.v.i v24, 2
+; RV64-NEXT:    vslideup.vi v24, v16, 7
+; RV64-NEXT:    vrgather.vv v16, v8, v20
 ; RV64-NEXT:    li a0, 164
 ; RV64-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; RV64-NEXT:    vmv.s.x v0, a0
 ; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
-; RV64-NEXT:    vrgather.vv v16, v12, v20, v0.t
+; RV64-NEXT:    vrgather.vv v16, v12, v24, v0.t
 ; RV64-NEXT:    vmv.v.v v8, v16
 ; RV64-NEXT:    ret
   %s = shufflevector <8 x i64> %x, <8 x i64> %y, <8 x i32> <i32 1, i32 2, i32 10, i32 5, i32 1, i32 10, i32 3, i32 13>
@@ -362,7 +358,7 @@ define <8 x i8> @splat_ve4_ins_i1ve3(<8 x i8> %v) {
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmv.s.x v9, a0
 ; CHECK-NEXT:    vmv.v.i v10, 4
-; CHECK-NEXT:    vsetivli zero, 2, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 2, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v10, v9, 1
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vrgather.vv v9, v8, v10
@@ -463,10 +459,9 @@ define <8 x i8> @splat_ve2_we0_ins_i2we4(<8 x i8> %v, <8 x i8> %w) {
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmv.s.x v10, a0
 ; CHECK-NEXT:    vmv.v.i v11, 0
-; CHECK-NEXT:    vsetivli zero, 3, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 3, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v11, v10, 2
 ; CHECK-NEXT:    li a0, 70
-; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vmv.s.x v0, a0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vrgather.vi v10, v8, 2
@@ -484,7 +479,7 @@ define <8 x i8> @splat_ve2_we0_ins_i2ve4_i5we6(<8 x i8> %v, <8 x i8> %w) {
 ; RV32-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; RV32-NEXT:    vmv.s.x v10, a0
 ; RV32-NEXT:    vmv.v.i v11, 0
-; RV32-NEXT:    vsetivli zero, 6, e8, mf2, tu, mu
+; RV32-NEXT:    vsetivli zero, 6, e8, mf2, ta, mu
 ; RV32-NEXT:    vslideup.vi v11, v10, 5
 ; RV32-NEXT:    lui a0, 8256
 ; RV32-NEXT:    addi a0, a0, 2
@@ -504,7 +499,7 @@ define <8 x i8> @splat_ve2_we0_ins_i2ve4_i5we6(<8 x i8> %v, <8 x i8> %w) {
 ; RV64-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; RV64-NEXT:    vmv.s.x v10, a0
 ; RV64-NEXT:    vmv.v.i v11, 0
-; RV64-NEXT:    vsetivli zero, 6, e8, mf2, tu, mu
+; RV64-NEXT:    vsetivli zero, 6, e8, mf2, ta, mu
 ; RV64-NEXT:    vslideup.vi v11, v10, 5
 ; RV64-NEXT:    lui a0, 8256
 ; RV64-NEXT:    addiw a0, a0, 2
@@ -526,7 +521,7 @@ define <8 x i8> @widen_splat_ve3(<4 x i8> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmv.v.i v9, 0
-; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v9, v8, 0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vrgather.vi v8, v9, 3
@@ -582,9 +577,9 @@ define <8 x i16> @splice_unary(<8 x i16> %x) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 6, e16, m1, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v9, v8, 2
-; CHECK-NEXT:    vsetivli zero, 8, e16, m1, tu, mu
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vi v9, v8, 6
-; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    vmv.v.v v8, v9
 ; CHECK-NEXT:    ret
   %s = shufflevector <8 x i16> %x, <8 x i16> poison, <8 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1>
   ret <8 x i16> %s
@@ -595,9 +590,9 @@ define <8 x i32> @splice_unary2(<8 x i32> %x) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 3, e32, m2, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v10, v8, 5
-; CHECK-NEXT:    vsetivli zero, 8, e32, m2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v10, v8, 3
-; CHECK-NEXT:    vmv2r.v v8, v10
+; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
   %s = shufflevector <8 x i32> %x, <8 x i32> poison, <8 x i32> <i32 undef, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4>
   ret <8 x i32> %s
@@ -608,7 +603,7 @@ define <8 x i16> @splice_binary(<8 x i16> %x, <8 x i16> %y) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 6, e16, m1, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 2
-; CHECK-NEXT:    vsetivli zero, 8, e16, m1, tu, mu
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 6
 ; CHECK-NEXT:    ret
   %s = shufflevector <8 x i16> %x, <8 x i16> %y, <8 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 9>
@@ -620,7 +615,7 @@ define <8 x i32> @splice_binary2(<8 x i32> %x, <8 x i32> %y) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 3, e32, m2, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 5
-; CHECK-NEXT:    vsetivli zero, 8, e32, m2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 3
 ; CHECK-NEXT:    ret
   %s = shufflevector <8 x i32> %x, <8 x i32> %y, <8 x i32> <i32 undef, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12>

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
index ab10e53485a3d..035d068e0b310 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
@@ -949,7 +949,7 @@ define void @mulhu_v8i16(<8 x i16>* %x) {
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
 ; CHECK-NEXT:    vmv.v.i v10, 3
 ; CHECK-NEXT:    vmerge.vim v10, v10, 2, v0
-; CHECK-NEXT:    vsetivli zero, 7, e16, m1, tu, mu
+; CHECK-NEXT:    vsetivli zero, 7, e16, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vi v10, v9, 6
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
 ; CHECK-NEXT:    vmv.v.i v11, 0
@@ -957,7 +957,7 @@ define void @mulhu_v8i16(<8 x i16>* %x) {
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, tu, mu
 ; CHECK-NEXT:    vmv.v.i v12, 0
 ; CHECK-NEXT:    vmv.s.x v12, a1
-; CHECK-NEXT:    vsetivli zero, 7, e16, m1, tu, mu
+; CHECK-NEXT:    vsetivli zero, 7, e16, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vi v11, v9, 6
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
 ; CHECK-NEXT:    lui a1, %hi(.LCPI53_0)
@@ -985,7 +985,7 @@ define void @mulhu_v4i32(<4 x i32>* %x) {
 ; CHECK-NEXT:    lui a1, 524288
 ; CHECK-NEXT:    vmv.s.x v9, a1
 ; CHECK-NEXT:    vmv.v.i v10, 0
-; CHECK-NEXT:    vsetivli zero, 3, e32, m1, tu, mu
+; CHECK-NEXT:    vsetivli zero, 3, e32, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vi v10, v9, 2
 ; CHECK-NEXT:    lui a1, %hi(.LCPI54_0)
 ; CHECK-NEXT:    addi a1, a1, %lo(.LCPI54_0)
@@ -998,9 +998,7 @@ define void @mulhu_v4i32(<4 x i32>* %x) {
 ; CHECK-NEXT:    li a1, 1
 ; CHECK-NEXT:    vmv.s.x v9, a1
 ; CHECK-NEXT:    vmv.v.i v10, 2
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, tu, mu
 ; CHECK-NEXT:    vslideup.vi v10, v9, 3
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v10
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
@@ -1215,7 +1213,7 @@ define void @mulhs_v2i64(<2 x i64>* %x) {
 ; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; RV32-NEXT:    vmv.s.x v8, a1
 ; RV32-NEXT:    vmv.v.i v9, 0
-; RV32-NEXT:    vsetivli zero, 3, e32, m1, tu, mu
+; RV32-NEXT:    vsetivli zero, 3, e32, m1, ta, mu
 ; RV32-NEXT:    vslideup.vi v9, v8, 2
 ; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; RV32-NEXT:    vsra.vv v8, v10, v9
@@ -4209,7 +4207,7 @@ define void @mulhu_v8i32(<8 x i32>* %x) {
 ; LMULMAX1-RV32-NEXT:    lui a2, 524288
 ; LMULMAX1-RV32-NEXT:    vmv.s.x v10, a2
 ; LMULMAX1-RV32-NEXT:    vmv.v.i v11, 0
-; LMULMAX1-RV32-NEXT:    vsetivli zero, 3, e32, m1, tu, mu
+; LMULMAX1-RV32-NEXT:    vsetivli zero, 3, e32, m1, ta, mu
 ; LMULMAX1-RV32-NEXT:    vslideup.vi v11, v10, 2
 ; LMULMAX1-RV32-NEXT:    lui a2, %hi(.LCPI131_0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a2, %lo(.LCPI131_0)
@@ -4222,9 +4220,7 @@ define void @mulhu_v8i32(<8 x i32>* %x) {
 ; LMULMAX1-RV32-NEXT:    li a2, 1
 ; LMULMAX1-RV32-NEXT:    vmv.s.x v12, a2
 ; LMULMAX1-RV32-NEXT:    vmv.v.i v13, 2
-; LMULMAX1-RV32-NEXT:    vsetvli zero, zero, e32, m1, tu, mu
 ; LMULMAX1-RV32-NEXT:    vslideup.vi v13, v12, 3
-; LMULMAX1-RV32-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; LMULMAX1-RV32-NEXT:    vsrl.vv v9, v9, v13
 ; LMULMAX1-RV32-NEXT:    vmulhu.vv v10, v8, v10
 ; LMULMAX1-RV32-NEXT:    vsub.vv v8, v8, v10
@@ -4271,7 +4267,7 @@ define void @mulhu_v4i64(<4 x i64>* %x) {
 ; LMULMAX2-RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
 ; LMULMAX2-RV32-NEXT:    vmv.s.x v12, a1
 ; LMULMAX2-RV32-NEXT:    vmv.v.i v14, 0
-; LMULMAX2-RV32-NEXT:    vsetivli zero, 6, e32, m2, tu, mu
+; LMULMAX2-RV32-NEXT:    vsetivli zero, 6, e32, m2, ta, mu
 ; LMULMAX2-RV32-NEXT:    vslideup.vi v14, v12, 5
 ; LMULMAX2-RV32-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; LMULMAX2-RV32-NEXT:    vmulhu.vv v8, v8, v14
@@ -4293,7 +4289,7 @@ define void @mulhu_v4i64(<4 x i64>* %x) {
 ; LMULMAX2-RV64-NEXT:    slli a1, a1, 63
 ; LMULMAX2-RV64-NEXT:    vmv.s.x v10, a1
 ; LMULMAX2-RV64-NEXT:    vmv.v.i v12, 0
-; LMULMAX2-RV64-NEXT:    vsetivli zero, 3, e64, m2, tu, mu
+; LMULMAX2-RV64-NEXT:    vsetivli zero, 3, e64, m2, ta, mu
 ; LMULMAX2-RV64-NEXT:    vslideup.vi v12, v10, 2
 ; LMULMAX2-RV64-NEXT:    lui a1, %hi(.LCPI132_0)
 ; LMULMAX2-RV64-NEXT:    addi a1, a1, %lo(.LCPI132_0)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll
index 16fc8b8f3eff3..6b1b9913e5dd0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll
@@ -667,7 +667,6 @@ define <64 x i1> @buildvec_mask_v64i1() {
 ; RV32-LMULMAX4-NEXT:    lui a0, 748384
 ; RV32-LMULMAX4-NEXT:    addi a0, a0, 1776
 ; RV32-LMULMAX4-NEXT:    vmv.s.x v0, a0
-; RV32-LMULMAX4-NEXT:    vsetvli zero, zero, e32, mf2, tu, mu
 ; RV32-LMULMAX4-NEXT:    vslideup.vi v0, v8, 1
 ; RV32-LMULMAX4-NEXT:    ret
 ;
@@ -688,7 +687,6 @@ define <64 x i1> @buildvec_mask_v64i1() {
 ; RV32-LMULMAX8-NEXT:    lui a0, 748384
 ; RV32-LMULMAX8-NEXT:    addi a0, a0, 1776
 ; RV32-LMULMAX8-NEXT:    vmv.s.x v0, a0
-; RV32-LMULMAX8-NEXT:    vsetvli zero, zero, e32, mf2, tu, mu
 ; RV32-LMULMAX8-NEXT:    vslideup.vi v0, v8, 1
 ; RV32-LMULMAX8-NEXT:    ret
 ;
@@ -790,16 +788,13 @@ define <128 x i1> @buildvec_mask_v128i1() {
 ; RV32-LMULMAX4-NEXT:    lui a0, 748384
 ; RV32-LMULMAX4-NEXT:    addi a0, a0, 1776
 ; RV32-LMULMAX4-NEXT:    vmv.s.x v0, a0
-; RV32-LMULMAX4-NEXT:    vsetvli zero, zero, e32, mf2, tu, mu
 ; RV32-LMULMAX4-NEXT:    vslideup.vi v0, v8, 1
 ; RV32-LMULMAX4-NEXT:    lui a0, 945060
 ; RV32-LMULMAX4-NEXT:    addi a0, a0, -1793
-; RV32-LMULMAX4-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; RV32-LMULMAX4-NEXT:    vmv.s.x v9, a0
 ; RV32-LMULMAX4-NEXT:    lui a0, 551776
 ; RV32-LMULMAX4-NEXT:    addi a0, a0, 1776
 ; RV32-LMULMAX4-NEXT:    vmv.s.x v8, a0
-; RV32-LMULMAX4-NEXT:    vsetvli zero, zero, e32, mf2, tu, mu
 ; RV32-LMULMAX4-NEXT:    vslideup.vi v8, v9, 1
 ; RV32-LMULMAX4-NEXT:    ret
 ;
@@ -823,19 +818,17 @@ define <128 x i1> @buildvec_mask_v128i1() {
 ; RV32-LMULMAX8-NEXT:    lui a0, 748384
 ; RV32-LMULMAX8-NEXT:    addi a0, a0, 1776
 ; RV32-LMULMAX8-NEXT:    vmv.s.x v0, a0
-; RV32-LMULMAX8-NEXT:    vsetivli zero, 2, e32, m1, tu, mu
+; RV32-LMULMAX8-NEXT:    vsetivli zero, 2, e32, m1, ta, mu
 ; RV32-LMULMAX8-NEXT:    vslideup.vi v0, v8, 1
 ; RV32-LMULMAX8-NEXT:    lui a0, 551776
 ; RV32-LMULMAX8-NEXT:    addi a0, a0, 1776
-; RV32-LMULMAX8-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; RV32-LMULMAX8-NEXT:    vmv.s.x v8, a0
-; RV32-LMULMAX8-NEXT:    vsetivli zero, 3, e32, m1, tu, mu
+; RV32-LMULMAX8-NEXT:    vsetivli zero, 3, e32, m1, ta, mu
 ; RV32-LMULMAX8-NEXT:    vslideup.vi v0, v8, 2
 ; RV32-LMULMAX8-NEXT:    lui a0, 945060
 ; RV32-LMULMAX8-NEXT:    addi a0, a0, -1793
-; RV32-LMULMAX8-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; RV32-LMULMAX8-NEXT:    vmv.s.x v8, a0
-; RV32-LMULMAX8-NEXT:    vsetvli zero, zero, e32, m1, tu, mu
+; RV32-LMULMAX8-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; RV32-LMULMAX8-NEXT:    vslideup.vi v0, v8, 3
 ; RV32-LMULMAX8-NEXT:    ret
 ;
@@ -848,7 +841,6 @@ define <128 x i1> @buildvec_mask_v128i1() {
 ; RV64-LMULMAX8-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; RV64-LMULMAX8-NEXT:    vmv.s.x v8, a0
 ; RV64-LMULMAX8-NEXT:    vmv.s.x v0, a1
-; RV64-LMULMAX8-NEXT:    vsetvli zero, zero, e64, m1, tu, mu
 ; RV64-LMULMAX8-NEXT:    vslideup.vi v0, v8, 1
 ; RV64-LMULMAX8-NEXT:    ret
   ret <128 x i1> <i1 0, i1 0, i1 0, i1 0, i1 1, i1 1, i1 1, i1 1, i1 0, i1 1, i1 1, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 1, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 0, i1 0, i1 0, i1 1, i1 1, i1 1, i1 0, i1 0, i1 0, i1 1, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1, i1 0, i1 0, i1 0, i1 0, i1 1, i1 1, i1 1, i1 1, i1 0, i1 1, i1 1, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 1, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1, i1 0, i1 1, i1 1, i1 0, i1 0, i1 0, i1 0, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 0, i1 0, i1 0, i1 1, i1 1, i1 1, i1 0, i1 0, i1 0, i1 1, i1 0, i1 1, i1 1, i1 1, i1 0, i1 1, i1 0, i1 1, i1 1, i1 0, i1 0, i1 1, i1 1, i1 1>

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-load-store.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-load-store.ll
index 924d20e8aff80..0568cbd22766a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-load-store.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-load-store.ll
@@ -13,7 +13,7 @@ define void @load_store_v1i1(<1 x i1>* %x, <1 x i1>* %y) {
 ; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmv.v.i v9, 0
-; CHECK-NEXT:    vsetivli zero, 1, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v9, v8, 0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmsne.vi v8, v9, 0
@@ -33,7 +33,7 @@ define void @load_store_v2i1(<2 x i1>* %x, <2 x i1>* %y) {
 ; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmv.v.i v9, 0
-; CHECK-NEXT:    vsetivli zero, 2, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 2, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v9, v8, 0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmsne.vi v8, v9, 0
@@ -53,7 +53,7 @@ define void @load_store_v4i1(<4 x i1>* %x, <4 x i1>* %y) {
 ; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmv.v.i v9, 0
-; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v9, v8, 0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmsne.vi v8, v9, 0

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll
index bfe695c553b0e..299e3027accb0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll
@@ -13,7 +13,7 @@ define void @splat_ones_v1i1(<1 x i1>* %x) {
 ; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmv.v.i v9, 0
-; CHECK-NEXT:    vsetivli zero, 1, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v9, v8, 0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmsne.vi v8, v9, 0
@@ -32,7 +32,7 @@ define void @splat_zeros_v2i1(<2 x i1>* %x) {
 ; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmv.v.i v9, 0
-; CHECK-NEXT:    vsetivli zero, 2, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 2, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v9, v8, 0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmsne.vi v8, v9, 0
@@ -53,7 +53,7 @@ define void @splat_v1i1(<1 x i1>* %x, i1 %y) {
 ; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmv.v.i v9, 0
-; CHECK-NEXT:    vsetivli zero, 1, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v9, v8, 0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmsne.vi v8, v9, 0
@@ -77,7 +77,7 @@ define void @splat_v1i1_icmp(<1 x i1>* %x, i32 signext %y, i32 signext %z) {
 ; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmv.v.i v9, 0
-; CHECK-NEXT:    vsetivli zero, 1, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v9, v8, 0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmsne.vi v8, v9, 0
@@ -99,7 +99,7 @@ define void @splat_ones_v4i1(<4 x i1>* %x) {
 ; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmv.v.i v9, 0
-; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v9, v8, 0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmsne.vi v8, v9, 0
@@ -120,7 +120,7 @@ define void @splat_v4i1(<4 x i1>* %x, i1 %y) {
 ; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmv.v.i v9, 0
-; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v9, v8, 0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmsne.vi v8, v9, 0

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
index 35cb5f7a16485..d6230ad944d36 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
@@ -2178,9 +2178,9 @@ define <32 x i8> @mgather_baseidx_v32i8(i8* %base, <32 x i8> %idxs, <32 x i1> %m
 ; RV64-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
 ; RV64-NEXT:    vluxei64.v v10, (a0), v16, v0.t
 ; RV64-NEXT:    li a0, 32
-; RV64-NEXT:    vsetvli zero, a0, e8, m2, tu, mu
+; RV64-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
 ; RV64-NEXT:    vslideup.vi v12, v10, 16
-; RV64-NEXT:    vmv2r.v v8, v12
+; RV64-NEXT:    vmv.v.v v8, v12
 ; RV64-NEXT:    ret
   %ptrs = getelementptr inbounds i8, i8* %base, <32 x i8> %idxs
   %v = call <32 x i8> @llvm.masked.gather.v32i8.v32p0i8(<32 x i8*> %ptrs, i32 2, <32 x i1> %m, <32 x i8> %passthru)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll
index 9c4729b65e449..7f8d7ef84c45a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll
@@ -56,7 +56,7 @@ define <2 x i16> @mgather_v2i16_align1(<2 x i16*> %ptrs, <2 x i1> %m, <2 x i16>
 ; RV32-NEXT:    vmerge.vim v10, v10, 1, v0
 ; RV32-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; RV32-NEXT:    vmv.v.i v11, 0
-; RV32-NEXT:    vsetivli zero, 2, e8, mf2, tu, mu
+; RV32-NEXT:    vsetivli zero, 2, e8, mf2, ta, mu
 ; RV32-NEXT:    vslideup.vi v11, v10, 0
 ; RV32-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; RV32-NEXT:    vmsne.vi v10, v11, 0
@@ -87,7 +87,6 @@ define <2 x i16> @mgather_v2i16_align1(<2 x i16*> %ptrs, <2 x i1> %m, <2 x i16>
 ; RV32-NEXT:    or a0, a1, a0
 ; RV32-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
 ; RV32-NEXT:    vmv.s.x v8, a0
-; RV32-NEXT:    vsetvli zero, zero, e16, mf4, tu, mu
 ; RV32-NEXT:    vslideup.vi v9, v8, 1
 ; RV32-NEXT:  .LBB4_4: # %else2
 ; RV32-NEXT:    vmv1r.v v8, v9
@@ -103,7 +102,7 @@ define <2 x i16> @mgather_v2i16_align1(<2 x i16*> %ptrs, <2 x i1> %m, <2 x i16>
 ; RV64-NEXT:    vmerge.vim v10, v10, 1, v0
 ; RV64-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; RV64-NEXT:    vmv.v.i v11, 0
-; RV64-NEXT:    vsetivli zero, 2, e8, mf2, tu, mu
+; RV64-NEXT:    vsetivli zero, 2, e8, mf2, ta, mu
 ; RV64-NEXT:    vslideup.vi v11, v10, 0
 ; RV64-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; RV64-NEXT:    vmsne.vi v10, v11, 0
@@ -134,7 +133,6 @@ define <2 x i16> @mgather_v2i16_align1(<2 x i16*> %ptrs, <2 x i1> %m, <2 x i16>
 ; RV64-NEXT:    or a0, a1, a0
 ; RV64-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
 ; RV64-NEXT:    vmv.s.x v8, a0
-; RV64-NEXT:    vsetvli zero, zero, e16, mf4, tu, mu
 ; RV64-NEXT:    vslideup.vi v9, v8, 1
 ; RV64-NEXT:  .LBB4_4: # %else2
 ; RV64-NEXT:    vmv1r.v v8, v9
@@ -156,7 +154,7 @@ define <2 x i64> @mgather_v2i64_align4(<2 x i64*> %ptrs, <2 x i1> %m, <2 x i64>
 ; RV32-NEXT:    vmerge.vim v10, v10, 1, v0
 ; RV32-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; RV32-NEXT:    vmv.v.i v11, 0
-; RV32-NEXT:    vsetivli zero, 2, e8, mf2, tu, mu
+; RV32-NEXT:    vsetivli zero, 2, e8, mf2, ta, mu
 ; RV32-NEXT:    vslideup.vi v11, v10, 0
 ; RV32-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; RV32-NEXT:    vmsne.vi v10, v11, 0
@@ -173,7 +171,7 @@ define <2 x i64> @mgather_v2i64_align4(<2 x i64*> %ptrs, <2 x i1> %m, <2 x i64>
 ; RV32-NEXT:    lw a1, 0(a1)
 ; RV32-NEXT:    vslide1up.vx v11, v10, a2
 ; RV32-NEXT:    vslide1up.vx v12, v11, a1
-; RV32-NEXT:    vsetivli zero, 1, e64, m1, tu, mu
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vslideup.vi v9, v12, 0
 ; RV32-NEXT:  .LBB5_2: # %else
 ; RV32-NEXT:    andi a0, a0, 2
@@ -187,7 +185,7 @@ define <2 x i64> @mgather_v2i64_align4(<2 x i64*> %ptrs, <2 x i1> %m, <2 x i64>
 ; RV32-NEXT:    vsetivli zero, 2, e32, m1, ta, mu
 ; RV32-NEXT:    vslide1up.vx v8, v10, a1
 ; RV32-NEXT:    vslide1up.vx v10, v8, a0
-; RV32-NEXT:    vsetivli zero, 2, e64, m1, tu, mu
+; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; RV32-NEXT:    vslideup.vi v9, v10, 1
 ; RV32-NEXT:  .LBB5_4: # %else2
 ; RV32-NEXT:    vmv1r.v v8, v9
@@ -203,7 +201,7 @@ define <2 x i64> @mgather_v2i64_align4(<2 x i64*> %ptrs, <2 x i1> %m, <2 x i64>
 ; RV64-NEXT:    vmerge.vim v10, v10, 1, v0
 ; RV64-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; RV64-NEXT:    vmv.v.i v11, 0
-; RV64-NEXT:    vsetivli zero, 2, e8, mf2, tu, mu
+; RV64-NEXT:    vsetivli zero, 2, e8, mf2, ta, mu
 ; RV64-NEXT:    vslideup.vi v11, v10, 0
 ; RV64-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; RV64-NEXT:    vmsne.vi v10, v11, 0
@@ -233,7 +231,7 @@ define <2 x i64> @mgather_v2i64_align4(<2 x i64*> %ptrs, <2 x i1> %m, <2 x i64>
 ; RV64-NEXT:    slli a1, a1, 32
 ; RV64-NEXT:    or a0, a1, a0
 ; RV64-NEXT:    vmv.s.x v8, a0
-; RV64-NEXT:    vsetivli zero, 2, e64, m1, tu, mu
+; RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; RV64-NEXT:    vslideup.vi v9, v8, 1
 ; RV64-NEXT:  .LBB5_4: # %else2
 ; RV64-NEXT:    vmv1r.v v8, v9
@@ -255,7 +253,7 @@ define void @mscatter_v4i16_align1(<4 x i16> %val, <4 x i16*> %ptrs, <4 x i1> %m
 ; RV32-NEXT:    vmerge.vim v10, v10, 1, v0
 ; RV32-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; RV32-NEXT:    vmv.v.i v11, 0
-; RV32-NEXT:    vsetivli zero, 4, e8, mf2, tu, mu
+; RV32-NEXT:    vsetivli zero, 4, e8, mf2, ta, mu
 ; RV32-NEXT:    vslideup.vi v11, v10, 0
 ; RV32-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; RV32-NEXT:    vmsne.vi v10, v11, 0
@@ -332,7 +330,7 @@ define void @mscatter_v4i16_align1(<4 x i16> %val, <4 x i16*> %ptrs, <4 x i1> %m
 ; RV64-NEXT:    vmerge.vim v9, v9, 1, v0
 ; RV64-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; RV64-NEXT:    vmv.v.i v12, 0
-; RV64-NEXT:    vsetivli zero, 4, e8, mf2, tu, mu
+; RV64-NEXT:    vsetivli zero, 4, e8, mf2, ta, mu
 ; RV64-NEXT:    vslideup.vi v12, v9, 0
 ; RV64-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; RV64-NEXT:    vmsne.vi v9, v12, 0
@@ -415,7 +413,7 @@ define void @mscatter_v2i32_align2(<2 x i32> %val, <2 x i32*> %ptrs, <2 x i1> %m
 ; RV32-NEXT:    vmerge.vim v10, v10, 1, v0
 ; RV32-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; RV32-NEXT:    vmv.v.i v11, 0
-; RV32-NEXT:    vsetivli zero, 2, e8, mf2, tu, mu
+; RV32-NEXT:    vsetivli zero, 2, e8, mf2, ta, mu
 ; RV32-NEXT:    vslideup.vi v11, v10, 0
 ; RV32-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; RV32-NEXT:    vmsne.vi v10, v11, 0
@@ -460,7 +458,7 @@ define void @mscatter_v2i32_align2(<2 x i32> %val, <2 x i32*> %ptrs, <2 x i1> %m
 ; RV64-NEXT:    vmerge.vim v10, v10, 1, v0
 ; RV64-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; RV64-NEXT:    vmv.v.i v11, 0
-; RV64-NEXT:    vsetivli zero, 2, e8, mf2, tu, mu
+; RV64-NEXT:    vsetivli zero, 2, e8, mf2, ta, mu
 ; RV64-NEXT:    vslideup.vi v11, v10, 0
 ; RV64-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; RV64-NEXT:    vmsne.vi v10, v11, 0
@@ -514,7 +512,7 @@ define void @masked_load_v2i32_align1(<2 x i32>* %a, <2 x i32> %m, <2 x i32>* %r
 ; RV32-NEXT:    vmerge.vim v8, v8, 1, v0
 ; RV32-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; RV32-NEXT:    vmv.v.i v9, 0
-; RV32-NEXT:    vsetivli zero, 2, e8, mf2, tu, mu
+; RV32-NEXT:    vsetivli zero, 2, e8, mf2, ta, mu
 ; RV32-NEXT:    vslideup.vi v9, v8, 0
 ; RV32-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; RV32-NEXT:    vmsne.vi v8, v9, 0
@@ -556,10 +554,8 @@ define void @masked_load_v2i32_align1(<2 x i32>* %a, <2 x i32> %m, <2 x i32>* %r
 ; RV32-NEXT:    slli a0, a0, 16
 ; RV32-NEXT:    or a0, a0, a2
 ; RV32-NEXT:    vmv.s.x v9, a0
-; RV32-NEXT:    vsetvli zero, zero, e32, mf2, tu, mu
 ; RV32-NEXT:    vslideup.vi v8, v9, 1
 ; RV32-NEXT:  .LBB8_4: # %else2
-; RV32-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; RV32-NEXT:    vse32.v v8, (a1)
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
@@ -574,7 +570,7 @@ define void @masked_load_v2i32_align1(<2 x i32>* %a, <2 x i32> %m, <2 x i32>* %r
 ; RV64-NEXT:    vmerge.vim v8, v8, 1, v0
 ; RV64-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; RV64-NEXT:    vmv.v.i v9, 0
-; RV64-NEXT:    vsetivli zero, 2, e8, mf2, tu, mu
+; RV64-NEXT:    vsetivli zero, 2, e8, mf2, ta, mu
 ; RV64-NEXT:    vslideup.vi v9, v8, 0
 ; RV64-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; RV64-NEXT:    vmsne.vi v8, v9, 0
@@ -616,10 +612,8 @@ define void @masked_load_v2i32_align1(<2 x i32>* %a, <2 x i32> %m, <2 x i32>* %r
 ; RV64-NEXT:    slli a0, a0, 16
 ; RV64-NEXT:    or a0, a0, a2
 ; RV64-NEXT:    vmv.s.x v9, a0
-; RV64-NEXT:    vsetvli zero, zero, e32, mf2, tu, mu
 ; RV64-NEXT:    vslideup.vi v8, v9, 1
 ; RV64-NEXT:  .LBB8_4: # %else2
-; RV64-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; RV64-NEXT:    vse32.v v8, (a1)
 ; RV64-NEXT:    addi sp, sp, 16
 ; RV64-NEXT:    ret
@@ -642,7 +636,7 @@ define void @masked_store_v2i32_align2(<2 x i32> %val, <2 x i32>* %a, <2 x i32>
 ; CHECK-NEXT:    vmerge.vim v9, v9, 1, v0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmv.v.i v10, 0
-; CHECK-NEXT:    vsetivli zero, 2, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 2, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v10, v9, 0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmsne.vi v9, v10, 0

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll
index 56c1da15c6384..5cfa1e4c4ff76 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll
@@ -311,7 +311,7 @@ define <32 x i8> @vpgather_baseidx_v32i8(i8* %base, <32 x i8> %idxs, <32 x i1> %
 ; RV64-NEXT:    vmv1r.v v0, v10
 ; RV64-NEXT:    vluxei64.v v8, (a0), v16, v0.t
 ; RV64-NEXT:    li a0, 32
-; RV64-NEXT:    vsetvli zero, a0, e8, m2, tu, mu
+; RV64-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
 ; RV64-NEXT:    vslideup.vi v8, v12, 16
 ; RV64-NEXT:    ret
   %ptrs = getelementptr inbounds i8, i8* %base, <32 x i8> %idxs

diff  --git a/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll
index d9b01b739ca1c..4a3acb9acfb04 100644
--- a/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll
@@ -61,7 +61,7 @@ define <vscale x 4 x i8> @insert_nxv1i8_nxv4i8_0(<vscale x 4 x i8> %vec, <vscale
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 3
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 4 x i8> @llvm.experimental.vector.insert.nxv1i8.nxv4i8(<vscale x 4 x i8> %vec, <vscale x 1 x i8> %subvec, i64 0)
@@ -76,7 +76,7 @@ define <vscale x 4 x i8> @insert_nxv1i8_nxv4i8_3(<vscale x 4 x i8> %vec, <vscale
 ; CHECK-NEXT:    slli a1, a0, 1
 ; CHECK-NEXT:    add a1, a1, a0
 ; CHECK-NEXT:    add a0, a1, a0
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a1
 ; CHECK-NEXT:    ret
   %v = call <vscale x 4 x i8> @llvm.experimental.vector.insert.nxv1i8.nxv4i8(<vscale x 4 x i8> %vec, <vscale x 1 x i8> %subvec, i64 3)
@@ -214,7 +214,7 @@ define <vscale x 16 x i32> @insert_nxv16i32_nxv1i32_0(<vscale x 16 x i32> %vec,
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 3
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v16, 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv1i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 1 x i32> %subvec, i64 0)
@@ -227,7 +227,7 @@ define <vscale x 16 x i32> @insert_nxv16i32_nxv1i32_1(<vscale x 16 x i32> %vec,
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 3
 ; CHECK-NEXT:    add a1, a0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v16, a0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv1i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 1 x i32> %subvec, i64 1)
@@ -239,7 +239,7 @@ define <vscale x 16 x i32> @insert_nxv16i32_nxv1i32_6(<vscale x 16 x i32> %vec,
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 3
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vi v11, v16, 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv1i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 1 x i32> %subvec, i64 6)
@@ -251,7 +251,7 @@ define <vscale x 16 x i8> @insert_nxv16i8_nxv1i8_0(<vscale x 16 x i8> %vec, <vsc
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 3
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv1i8.nxv16i8(<vscale x 16 x i8> %vec, <vscale x 1 x i8> %subvec, i64 0)
@@ -264,7 +264,7 @@ define <vscale x 16 x i8> @insert_nxv16i8_nxv1i8_1(<vscale x 16 x i8> %vec, <vsc
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 3
 ; CHECK-NEXT:    add a1, a0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv1i8.nxv16i8(<vscale x 16 x i8> %vec, <vscale x 1 x i8> %subvec, i64 1)
@@ -278,7 +278,7 @@ define <vscale x 16 x i8> @insert_nxv16i8_nxv1i8_2(<vscale x 16 x i8> %vec, <vsc
 ; CHECK-NEXT:    srli a1, a0, 3
 ; CHECK-NEXT:    srli a0, a0, 2
 ; CHECK-NEXT:    add a1, a0, a1
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv1i8.nxv16i8(<vscale x 16 x i8> %vec, <vscale x 1 x i8> %subvec, i64 2)
@@ -293,7 +293,7 @@ define <vscale x 16 x i8> @insert_nxv16i8_nxv1i8_3(<vscale x 16 x i8> %vec, <vsc
 ; CHECK-NEXT:    slli a1, a0, 1
 ; CHECK-NEXT:    add a1, a1, a0
 ; CHECK-NEXT:    add a0, a1, a0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a1
 ; CHECK-NEXT:    ret
   %v = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv1i8.nxv16i8(<vscale x 16 x i8> %vec, <vscale x 1 x i8> %subvec, i64 3)
@@ -307,7 +307,7 @@ define <vscale x 16 x i8> @insert_nxv16i8_nxv1i8_7(<vscale x 16 x i8> %vec, <vsc
 ; CHECK-NEXT:    srli a0, a0, 3
 ; CHECK-NEXT:    slli a1, a0, 3
 ; CHECK-NEXT:    sub a0, a1, a0
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv1i8.nxv16i8(<vscale x 16 x i8> %vec, <vscale x 1 x i8> %subvec, i64 7)
@@ -321,7 +321,7 @@ define <vscale x 16 x i8> @insert_nxv16i8_nxv1i8_15(<vscale x 16 x i8> %vec, <vs
 ; CHECK-NEXT:    srli a0, a0, 3
 ; CHECK-NEXT:    slli a1, a0, 3
 ; CHECK-NEXT:    sub a0, a1, a0
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vx v9, v10, a0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv1i8.nxv16i8(<vscale x 16 x i8> %vec, <vscale x 1 x i8> %subvec, i64 15)
@@ -333,7 +333,7 @@ define <vscale x 32 x half> @insert_nxv32f16_nxv2f16_0(<vscale x 32 x half> %vec
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 2
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v16, 0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 32 x half> @llvm.experimental.vector.insert.nxv2f16.nxv32f16(<vscale x 32 x half> %vec, <vscale x 2 x half> %subvec, i64 0)
@@ -346,7 +346,7 @@ define <vscale x 32 x half> @insert_nxv32f16_nxv2f16_2(<vscale x 32 x half> %vec
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 2
 ; CHECK-NEXT:    add a1, a0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v16, a0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 32 x half> @llvm.experimental.vector.insert.nxv2f16.nxv32f16(<vscale x 32 x half> %vec, <vscale x 2 x half> %subvec, i64 2)
@@ -359,7 +359,7 @@ define <vscale x 32 x half> @insert_nxv32f16_nxv2f16_26(<vscale x 32 x half> %ve
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 2
 ; CHECK-NEXT:    add a1, a0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vx v14, v16, a0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 32 x half> @llvm.experimental.vector.insert.nxv2f16.nxv32f16(<vscale x 32 x half> %vec, <vscale x 2 x half> %subvec, i64 26)
@@ -395,7 +395,7 @@ define <vscale x 32 x i1> @insert_nxv32i1_nxv8i1_0(<vscale x 32 x i1> %v, <vscal
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 3
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v0, v8, 0
 ; CHECK-NEXT:    ret
   %vec = call <vscale x 32 x i1> @llvm.experimental.vector.insert.nxv8i1.nxv32i1(<vscale x 32 x i1> %v, <vscale x 8 x i1> %sv, i64 0)
@@ -408,7 +408,7 @@ define <vscale x 32 x i1> @insert_nxv32i1_nxv8i1_8(<vscale x 32 x i1> %v, <vscal
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 3
 ; CHECK-NEXT:    add a1, a0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v0, v8, a0
 ; CHECK-NEXT:    ret
   %vec = call <vscale x 32 x i1> @llvm.experimental.vector.insert.nxv8i1.nxv32i1(<vscale x 32 x i1> %v, <vscale x 8 x i1> %sv, i64 8)
@@ -427,7 +427,7 @@ define <vscale x 4 x i1> @insert_nxv4i1_nxv1i1_0(<vscale x 4 x i1> %v, <vscale x
 ; CHECK-NEXT:    vmerge.vim v8, v10, 1, v0
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 3
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v9, v8, 0
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmsne.vi v0, v9, 0
@@ -450,7 +450,7 @@ define <vscale x 4 x i1> @insert_nxv4i1_nxv1i1_2(<vscale x 4 x i1> %v, <vscale x
 ; CHECK-NEXT:    vmv.v.i v10, 0
 ; CHECK-NEXT:    vmv1r.v v0, v8
 ; CHECK-NEXT:    vmerge.vim v8, v10, 1, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v9, v8, a0
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmsne.vi v0, v9, 0

diff  --git a/llvm/test/CodeGen/RISCV/rvv/insertelt-fp-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/insertelt-fp-rv32.ll
index 0c466174fd455..b58534140c458 100644
--- a/llvm/test/CodeGen/RISCV/rvv/insertelt-fp-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/insertelt-fp-rv32.ll
@@ -17,7 +17,7 @@ define <vscale x 1 x half> @insertelt_nxv1f16_imm(<vscale x 1 x half> %v, half %
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, mu
 ; CHECK-NEXT:    vfmv.s.f v9, fa0
-; CHECK-NEXT:    vsetivli zero, 4, e16, mf4, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e16, mf4, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 1 x half> %v, half %elt, i32 3
@@ -30,7 +30,7 @@ define <vscale x 1 x half> @insertelt_nxv1f16_idx(<vscale x 1 x half> %v, half %
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, mu
 ; CHECK-NEXT:    vfmv.s.f v9, fa0
 ; CHECK-NEXT:    addi a1, a0, 1
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 1 x half> %v, half %elt, i32 %idx
@@ -52,7 +52,7 @@ define <vscale x 2 x half> @insertelt_nxv2f16_imm(<vscale x 2 x half> %v, half %
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
 ; CHECK-NEXT:    vfmv.s.f v9, fa0
-; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 2 x half> %v, half %elt, i32 3
@@ -65,7 +65,7 @@ define <vscale x 2 x half> @insertelt_nxv2f16_idx(<vscale x 2 x half> %v, half %
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, mu
 ; CHECK-NEXT:    vfmv.s.f v9, fa0
 ; CHECK-NEXT:    addi a1, a0, 1
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 2 x half> %v, half %elt, i32 %idx
@@ -87,7 +87,7 @@ define <vscale x 4 x half> @insertelt_nxv4f16_imm(<vscale x 4 x half> %v, half %
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vfmv.s.f v9, fa0
-; CHECK-NEXT:    vsetivli zero, 4, e16, m1, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e16, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 4 x half> %v, half %elt, i32 3
@@ -100,7 +100,7 @@ define <vscale x 4 x half> @insertelt_nxv4f16_idx(<vscale x 4 x half> %v, half %
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vfmv.s.f v9, fa0
 ; CHECK-NEXT:    addi a1, a0, 1
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 4 x half> %v, half %elt, i32 %idx
@@ -122,7 +122,7 @@ define <vscale x 8 x half> @insertelt_nxv8f16_imm(<vscale x 8 x half> %v, half %
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, mu
 ; CHECK-NEXT:    vfmv.s.f v10, fa0
-; CHECK-NEXT:    vsetivli zero, 4, e16, m2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e16, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 8 x half> %v, half %elt, i32 3
@@ -135,7 +135,7 @@ define <vscale x 8 x half> @insertelt_nxv8f16_idx(<vscale x 8 x half> %v, half %
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, mu
 ; CHECK-NEXT:    vfmv.s.f v10, fa0
 ; CHECK-NEXT:    addi a1, a0, 1
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 8 x half> %v, half %elt, i32 %idx
@@ -157,7 +157,7 @@ define <vscale x 16 x half> @insertelt_nxv16f16_imm(<vscale x 16 x half> %v, hal
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, mu
 ; CHECK-NEXT:    vfmv.s.f v12, fa0
-; CHECK-NEXT:    vsetivli zero, 4, e16, m4, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e16, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 16 x half> %v, half %elt, i32 3
@@ -170,7 +170,7 @@ define <vscale x 16 x half> @insertelt_nxv16f16_idx(<vscale x 16 x half> %v, hal
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, mu
 ; CHECK-NEXT:    vfmv.s.f v12, fa0
 ; CHECK-NEXT:    addi a1, a0, 1
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 16 x half> %v, half %elt, i32 %idx
@@ -192,7 +192,7 @@ define <vscale x 32 x half> @insertelt_nxv32f16_imm(<vscale x 32 x half> %v, hal
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, mu
 ; CHECK-NEXT:    vfmv.s.f v16, fa0
-; CHECK-NEXT:    vsetivli zero, 4, e16, m8, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e16, m8, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v16, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 32 x half> %v, half %elt, i32 3
@@ -205,7 +205,7 @@ define <vscale x 32 x half> @insertelt_nxv32f16_idx(<vscale x 32 x half> %v, hal
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, mu
 ; CHECK-NEXT:    vfmv.s.f v16, fa0
 ; CHECK-NEXT:    addi a1, a0, 1
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v16, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 32 x half> %v, half %elt, i32 %idx
@@ -227,7 +227,7 @@ define <vscale x 1 x float> @insertelt_nxv1f32_imm(<vscale x 1 x float> %v, floa
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfmv.s.f v9, fa0
-; CHECK-NEXT:    vsetivli zero, 4, e32, mf2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e32, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 1 x float> %v, float %elt, i32 3
@@ -240,7 +240,7 @@ define <vscale x 1 x float> @insertelt_nxv1f32_idx(<vscale x 1 x float> %v, floa
 ; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfmv.s.f v9, fa0
 ; CHECK-NEXT:    addi a1, a0, 1
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 1 x float> %v, float %elt, i32 %idx
@@ -262,7 +262,7 @@ define <vscale x 2 x float> @insertelt_nxv2f32_imm(<vscale x 2 x float> %v, floa
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfmv.s.f v9, fa0
-; CHECK-NEXT:    vsetivli zero, 4, e32, m1, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 2 x float> %v, float %elt, i32 3
@@ -275,7 +275,7 @@ define <vscale x 2 x float> @insertelt_nxv2f32_idx(<vscale x 2 x float> %v, floa
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfmv.s.f v9, fa0
 ; CHECK-NEXT:    addi a1, a0, 1
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 2 x float> %v, float %elt, i32 %idx
@@ -297,7 +297,7 @@ define <vscale x 4 x float> @insertelt_nxv4f32_imm(<vscale x 4 x float> %v, floa
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vfmv.s.f v10, fa0
-; CHECK-NEXT:    vsetivli zero, 4, e32, m2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e32, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 4 x float> %v, float %elt, i32 3
@@ -310,7 +310,7 @@ define <vscale x 4 x float> @insertelt_nxv4f32_idx(<vscale x 4 x float> %v, floa
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vfmv.s.f v10, fa0
 ; CHECK-NEXT:    addi a1, a0, 1
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 4 x float> %v, float %elt, i32 %idx
@@ -332,7 +332,7 @@ define <vscale x 8 x float> @insertelt_nxv8f32_imm(<vscale x 8 x float> %v, floa
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vfmv.s.f v12, fa0
-; CHECK-NEXT:    vsetivli zero, 4, e32, m4, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e32, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 8 x float> %v, float %elt, i32 3
@@ -345,7 +345,7 @@ define <vscale x 8 x float> @insertelt_nxv8f32_idx(<vscale x 8 x float> %v, floa
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vfmv.s.f v12, fa0
 ; CHECK-NEXT:    addi a1, a0, 1
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 8 x float> %v, float %elt, i32 %idx
@@ -367,7 +367,7 @@ define <vscale x 16 x float> @insertelt_nxv16f32_imm(<vscale x 16 x float> %v, f
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, mu
 ; CHECK-NEXT:    vfmv.s.f v16, fa0
-; CHECK-NEXT:    vsetivli zero, 4, e32, m8, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e32, m8, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v16, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 16 x float> %v, float %elt, i32 3
@@ -380,7 +380,7 @@ define <vscale x 16 x float> @insertelt_nxv16f32_idx(<vscale x 16 x float> %v, f
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m8, ta, mu
 ; CHECK-NEXT:    vfmv.s.f v16, fa0
 ; CHECK-NEXT:    addi a1, a0, 1
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v16, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 16 x float> %v, float %elt, i32 %idx
@@ -402,7 +402,7 @@ define <vscale x 1 x double> @insertelt_nxv1f64_imm(<vscale x 1 x double> %v, do
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vfmv.s.f v9, fa0
-; CHECK-NEXT:    vsetivli zero, 4, e64, m1, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e64, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 1 x double> %v, double %elt, i32 3
@@ -415,7 +415,7 @@ define <vscale x 1 x double> @insertelt_nxv1f64_idx(<vscale x 1 x double> %v, do
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vfmv.s.f v9, fa0
 ; CHECK-NEXT:    addi a1, a0, 1
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 1 x double> %v, double %elt, i32 %idx
@@ -437,7 +437,7 @@ define <vscale x 2 x double> @insertelt_nxv2f64_imm(<vscale x 2 x double> %v, do
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vfmv.s.f v10, fa0
-; CHECK-NEXT:    vsetivli zero, 4, e64, m2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 2 x double> %v, double %elt, i32 3
@@ -450,7 +450,7 @@ define <vscale x 2 x double> @insertelt_nxv2f64_idx(<vscale x 2 x double> %v, do
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vfmv.s.f v10, fa0
 ; CHECK-NEXT:    addi a1, a0, 1
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 2 x double> %v, double %elt, i32 %idx
@@ -472,7 +472,7 @@ define <vscale x 4 x double> @insertelt_nxv4f64_imm(<vscale x 4 x double> %v, do
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vfmv.s.f v12, fa0
-; CHECK-NEXT:    vsetivli zero, 4, e64, m4, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e64, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 4 x double> %v, double %elt, i32 3
@@ -485,7 +485,7 @@ define <vscale x 4 x double> @insertelt_nxv4f64_idx(<vscale x 4 x double> %v, do
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vfmv.s.f v12, fa0
 ; CHECK-NEXT:    addi a1, a0, 1
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 4 x double> %v, double %elt, i32 %idx
@@ -507,7 +507,7 @@ define <vscale x 8 x double> @insertelt_nxv8f64_imm(<vscale x 8 x double> %v, do
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfmv.s.f v16, fa0
-; CHECK-NEXT:    vsetivli zero, 4, e64, m8, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e64, m8, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v16, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 8 x double> %v, double %elt, i32 3
@@ -520,7 +520,7 @@ define <vscale x 8 x double> @insertelt_nxv8f64_idx(<vscale x 8 x double> %v, do
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfmv.s.f v16, fa0
 ; CHECK-NEXT:    addi a1, a0, 1
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v16, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 8 x double> %v, double %elt, i32 %idx

diff  --git a/llvm/test/CodeGen/RISCV/rvv/insertelt-fp-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/insertelt-fp-rv64.ll
index ebf7f42983628..5c447941d003f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/insertelt-fp-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/insertelt-fp-rv64.ll
@@ -17,7 +17,7 @@ define <vscale x 1 x half> @insertelt_nxv1f16_imm(<vscale x 1 x half> %v, half %
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, mu
 ; CHECK-NEXT:    vfmv.s.f v9, fa0
-; CHECK-NEXT:    vsetivli zero, 4, e16, mf4, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e16, mf4, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 1 x half> %v, half %elt, i32 3
@@ -30,7 +30,7 @@ define <vscale x 1 x half> @insertelt_nxv1f16_idx(<vscale x 1 x half> %v, half %
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, mu
 ; CHECK-NEXT:    vfmv.s.f v9, fa0
 ; CHECK-NEXT:    addi a1, a0, 1
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 1 x half> %v, half %elt, i32 %idx
@@ -52,7 +52,7 @@ define <vscale x 2 x half> @insertelt_nxv2f16_imm(<vscale x 2 x half> %v, half %
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
 ; CHECK-NEXT:    vfmv.s.f v9, fa0
-; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 2 x half> %v, half %elt, i32 3
@@ -65,7 +65,7 @@ define <vscale x 2 x half> @insertelt_nxv2f16_idx(<vscale x 2 x half> %v, half %
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, mu
 ; CHECK-NEXT:    vfmv.s.f v9, fa0
 ; CHECK-NEXT:    addi a1, a0, 1
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 2 x half> %v, half %elt, i32 %idx
@@ -87,7 +87,7 @@ define <vscale x 4 x half> @insertelt_nxv4f16_imm(<vscale x 4 x half> %v, half %
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vfmv.s.f v9, fa0
-; CHECK-NEXT:    vsetivli zero, 4, e16, m1, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e16, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 4 x half> %v, half %elt, i32 3
@@ -100,7 +100,7 @@ define <vscale x 4 x half> @insertelt_nxv4f16_idx(<vscale x 4 x half> %v, half %
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vfmv.s.f v9, fa0
 ; CHECK-NEXT:    addi a1, a0, 1
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 4 x half> %v, half %elt, i32 %idx
@@ -122,7 +122,7 @@ define <vscale x 8 x half> @insertelt_nxv8f16_imm(<vscale x 8 x half> %v, half %
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, mu
 ; CHECK-NEXT:    vfmv.s.f v10, fa0
-; CHECK-NEXT:    vsetivli zero, 4, e16, m2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e16, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 8 x half> %v, half %elt, i32 3
@@ -135,7 +135,7 @@ define <vscale x 8 x half> @insertelt_nxv8f16_idx(<vscale x 8 x half> %v, half %
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, mu
 ; CHECK-NEXT:    vfmv.s.f v10, fa0
 ; CHECK-NEXT:    addi a1, a0, 1
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 8 x half> %v, half %elt, i32 %idx
@@ -157,7 +157,7 @@ define <vscale x 16 x half> @insertelt_nxv16f16_imm(<vscale x 16 x half> %v, hal
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, mu
 ; CHECK-NEXT:    vfmv.s.f v12, fa0
-; CHECK-NEXT:    vsetivli zero, 4, e16, m4, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e16, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 16 x half> %v, half %elt, i32 3
@@ -170,7 +170,7 @@ define <vscale x 16 x half> @insertelt_nxv16f16_idx(<vscale x 16 x half> %v, hal
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, mu
 ; CHECK-NEXT:    vfmv.s.f v12, fa0
 ; CHECK-NEXT:    addi a1, a0, 1
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 16 x half> %v, half %elt, i32 %idx
@@ -192,7 +192,7 @@ define <vscale x 32 x half> @insertelt_nxv32f16_imm(<vscale x 32 x half> %v, hal
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, mu
 ; CHECK-NEXT:    vfmv.s.f v16, fa0
-; CHECK-NEXT:    vsetivli zero, 4, e16, m8, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e16, m8, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v16, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 32 x half> %v, half %elt, i32 3
@@ -205,7 +205,7 @@ define <vscale x 32 x half> @insertelt_nxv32f16_idx(<vscale x 32 x half> %v, hal
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, mu
 ; CHECK-NEXT:    vfmv.s.f v16, fa0
 ; CHECK-NEXT:    addi a1, a0, 1
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v16, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 32 x half> %v, half %elt, i32 %idx
@@ -227,7 +227,7 @@ define <vscale x 1 x float> @insertelt_nxv1f32_imm(<vscale x 1 x float> %v, floa
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfmv.s.f v9, fa0
-; CHECK-NEXT:    vsetivli zero, 4, e32, mf2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e32, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 1 x float> %v, float %elt, i32 3
@@ -240,7 +240,7 @@ define <vscale x 1 x float> @insertelt_nxv1f32_idx(<vscale x 1 x float> %v, floa
 ; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfmv.s.f v9, fa0
 ; CHECK-NEXT:    addi a1, a0, 1
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 1 x float> %v, float %elt, i32 %idx
@@ -262,7 +262,7 @@ define <vscale x 2 x float> @insertelt_nxv2f32_imm(<vscale x 2 x float> %v, floa
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfmv.s.f v9, fa0
-; CHECK-NEXT:    vsetivli zero, 4, e32, m1, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 2 x float> %v, float %elt, i32 3
@@ -275,7 +275,7 @@ define <vscale x 2 x float> @insertelt_nxv2f32_idx(<vscale x 2 x float> %v, floa
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfmv.s.f v9, fa0
 ; CHECK-NEXT:    addi a1, a0, 1
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 2 x float> %v, float %elt, i32 %idx
@@ -297,7 +297,7 @@ define <vscale x 4 x float> @insertelt_nxv4f32_imm(<vscale x 4 x float> %v, floa
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vfmv.s.f v10, fa0
-; CHECK-NEXT:    vsetivli zero, 4, e32, m2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e32, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 4 x float> %v, float %elt, i32 3
@@ -310,7 +310,7 @@ define <vscale x 4 x float> @insertelt_nxv4f32_idx(<vscale x 4 x float> %v, floa
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vfmv.s.f v10, fa0
 ; CHECK-NEXT:    addi a1, a0, 1
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 4 x float> %v, float %elt, i32 %idx
@@ -332,7 +332,7 @@ define <vscale x 8 x float> @insertelt_nxv8f32_imm(<vscale x 8 x float> %v, floa
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vfmv.s.f v12, fa0
-; CHECK-NEXT:    vsetivli zero, 4, e32, m4, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e32, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 8 x float> %v, float %elt, i32 3
@@ -345,7 +345,7 @@ define <vscale x 8 x float> @insertelt_nxv8f32_idx(<vscale x 8 x float> %v, floa
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vfmv.s.f v12, fa0
 ; CHECK-NEXT:    addi a1, a0, 1
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 8 x float> %v, float %elt, i32 %idx
@@ -367,7 +367,7 @@ define <vscale x 16 x float> @insertelt_nxv16f32_imm(<vscale x 16 x float> %v, f
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, mu
 ; CHECK-NEXT:    vfmv.s.f v16, fa0
-; CHECK-NEXT:    vsetivli zero, 4, e32, m8, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e32, m8, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v16, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 16 x float> %v, float %elt, i32 3
@@ -380,7 +380,7 @@ define <vscale x 16 x float> @insertelt_nxv16f32_idx(<vscale x 16 x float> %v, f
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m8, ta, mu
 ; CHECK-NEXT:    vfmv.s.f v16, fa0
 ; CHECK-NEXT:    addi a1, a0, 1
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v16, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 16 x float> %v, float %elt, i32 %idx
@@ -402,7 +402,7 @@ define <vscale x 1 x double> @insertelt_nxv1f64_imm(<vscale x 1 x double> %v, do
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vfmv.s.f v9, fa0
-; CHECK-NEXT:    vsetivli zero, 4, e64, m1, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e64, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 1 x double> %v, double %elt, i32 3
@@ -415,7 +415,7 @@ define <vscale x 1 x double> @insertelt_nxv1f64_idx(<vscale x 1 x double> %v, do
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vfmv.s.f v9, fa0
 ; CHECK-NEXT:    addi a1, a0, 1
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 1 x double> %v, double %elt, i32 %idx
@@ -437,7 +437,7 @@ define <vscale x 2 x double> @insertelt_nxv2f64_imm(<vscale x 2 x double> %v, do
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vfmv.s.f v10, fa0
-; CHECK-NEXT:    vsetivli zero, 4, e64, m2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 2 x double> %v, double %elt, i32 3
@@ -450,7 +450,7 @@ define <vscale x 2 x double> @insertelt_nxv2f64_idx(<vscale x 2 x double> %v, do
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vfmv.s.f v10, fa0
 ; CHECK-NEXT:    addi a1, a0, 1
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 2 x double> %v, double %elt, i32 %idx
@@ -472,7 +472,7 @@ define <vscale x 4 x double> @insertelt_nxv4f64_imm(<vscale x 4 x double> %v, do
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vfmv.s.f v12, fa0
-; CHECK-NEXT:    vsetivli zero, 4, e64, m4, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e64, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 4 x double> %v, double %elt, i32 3
@@ -485,7 +485,7 @@ define <vscale x 4 x double> @insertelt_nxv4f64_idx(<vscale x 4 x double> %v, do
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vfmv.s.f v12, fa0
 ; CHECK-NEXT:    addi a1, a0, 1
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 4 x double> %v, double %elt, i32 %idx
@@ -507,7 +507,7 @@ define <vscale x 8 x double> @insertelt_nxv8f64_imm(<vscale x 8 x double> %v, do
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfmv.s.f v16, fa0
-; CHECK-NEXT:    vsetivli zero, 4, e64, m8, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e64, m8, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v16, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 8 x double> %v, double %elt, i32 3
@@ -520,7 +520,7 @@ define <vscale x 8 x double> @insertelt_nxv8f64_idx(<vscale x 8 x double> %v, do
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfmv.s.f v16, fa0
 ; CHECK-NEXT:    addi a1, a0, 1
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v16, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 8 x double> %v, double %elt, i32 %idx

diff  --git a/llvm/test/CodeGen/RISCV/rvv/insertelt-i1.ll b/llvm/test/CodeGen/RISCV/rvv/insertelt-i1.ll
index da0f5c06c63ac..e8511e163304e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/insertelt-i1.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/insertelt-i1.ll
@@ -9,7 +9,7 @@ define <vscale x 1 x i1> @insertelt_nxv1i1(<vscale x 1 x i1> %x, i1 %elt) {
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    vmv.v.i v9, 0
 ; CHECK-NEXT:    vmerge.vim v9, v9, 1, v0
-; CHECK-NEXT:    vsetivli zero, 3, e8, mf8, tu, mu
+; CHECK-NEXT:    vsetivli zero, 3, e8, mf8, ta, mu
 ; CHECK-NEXT:    vslideup.vi v9, v8, 2
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, mu
 ; CHECK-NEXT:    vand.vi v8, v9, 1
@@ -27,7 +27,7 @@ define <vscale x 1 x i1> @insertelt_idx_nxv1i1(<vscale x 1 x i1> %x, i1 %elt, i6
 ; CHECK-NEXT:    vmv.v.i v9, 0
 ; CHECK-NEXT:    vmerge.vim v9, v9, 1, v0
 ; CHECK-NEXT:    addi a0, a1, 1
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
 ; CHECK-NEXT:    vslideup.vx v9, v8, a1
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, mu
 ; CHECK-NEXT:    vand.vi v8, v9, 1
@@ -44,7 +44,7 @@ define <vscale x 2 x i1> @insertelt_nxv2i1(<vscale x 2 x i1> %x, i1 %elt) {
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    vmv.v.i v9, 0
 ; CHECK-NEXT:    vmerge.vim v9, v9, 1, v0
-; CHECK-NEXT:    vsetivli zero, 3, e8, mf4, tu, mu
+; CHECK-NEXT:    vsetivli zero, 3, e8, mf4, ta, mu
 ; CHECK-NEXT:    vslideup.vi v9, v8, 2
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, mu
 ; CHECK-NEXT:    vand.vi v8, v9, 1
@@ -62,7 +62,7 @@ define <vscale x 2 x i1> @insertelt_idx_nxv2i1(<vscale x 2 x i1> %x, i1 %elt, i6
 ; CHECK-NEXT:    vmv.v.i v9, 0
 ; CHECK-NEXT:    vmerge.vim v9, v9, 1, v0
 ; CHECK-NEXT:    addi a0, a1, 1
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
 ; CHECK-NEXT:    vslideup.vx v9, v8, a1
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, mu
 ; CHECK-NEXT:    vand.vi v8, v9, 1
@@ -79,7 +79,7 @@ define <vscale x 4 x i1> @insertelt_nxv4i1(<vscale x 4 x i1> %x, i1 %elt) {
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    vmv.v.i v9, 0
 ; CHECK-NEXT:    vmerge.vim v9, v9, 1, v0
-; CHECK-NEXT:    vsetivli zero, 3, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 3, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v9, v8, 2
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, mu
 ; CHECK-NEXT:    vand.vi v8, v9, 1
@@ -97,7 +97,7 @@ define <vscale x 4 x i1> @insertelt_idx_nxv4i1(<vscale x 4 x i1> %x, i1 %elt, i6
 ; CHECK-NEXT:    vmv.v.i v9, 0
 ; CHECK-NEXT:    vmerge.vim v9, v9, 1, v0
 ; CHECK-NEXT:    addi a0, a1, 1
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v9, v8, a1
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, mu
 ; CHECK-NEXT:    vand.vi v8, v9, 1
@@ -114,7 +114,7 @@ define <vscale x 8 x i1> @insertelt_nxv8i1(<vscale x 8 x i1> %x, i1 %elt) {
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    vmv.v.i v9, 0
 ; CHECK-NEXT:    vmerge.vim v9, v9, 1, v0
-; CHECK-NEXT:    vsetivli zero, 3, e8, m1, tu, mu
+; CHECK-NEXT:    vsetivli zero, 3, e8, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vi v9, v8, 2
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vand.vi v8, v9, 1
@@ -132,7 +132,7 @@ define <vscale x 8 x i1> @insertelt_idx_nxv8i1(<vscale x 8 x i1> %x, i1 %elt, i6
 ; CHECK-NEXT:    vmv.v.i v9, 0
 ; CHECK-NEXT:    vmerge.vim v9, v9, 1, v0
 ; CHECK-NEXT:    addi a0, a1, 1
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vx v9, v8, a1
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vand.vi v8, v9, 1
@@ -149,7 +149,7 @@ define <vscale x 16 x i1> @insertelt_nxv16i1(<vscale x 16 x i1> %x, i1 %elt) {
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    vmv.v.i v10, 0
 ; CHECK-NEXT:    vmerge.vim v10, v10, 1, v0
-; CHECK-NEXT:    vsetivli zero, 3, e8, m2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 3, e8, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v10, v8, 2
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, mu
 ; CHECK-NEXT:    vand.vi v8, v10, 1
@@ -167,7 +167,7 @@ define <vscale x 16 x i1> @insertelt_idx_nxv16i1(<vscale x 16 x i1> %x, i1 %elt,
 ; CHECK-NEXT:    vmv.v.i v10, 0
 ; CHECK-NEXT:    vmerge.vim v10, v10, 1, v0
 ; CHECK-NEXT:    addi a0, a1, 1
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v10, v8, a1
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, mu
 ; CHECK-NEXT:    vand.vi v8, v10, 1
@@ -184,7 +184,7 @@ define <vscale x 32 x i1> @insertelt_nxv32i1(<vscale x 32 x i1> %x, i1 %elt) {
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    vmv.v.i v12, 0
 ; CHECK-NEXT:    vmerge.vim v12, v12, 1, v0
-; CHECK-NEXT:    vsetivli zero, 3, e8, m4, tu, mu
+; CHECK-NEXT:    vsetivli zero, 3, e8, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vi v12, v8, 2
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, mu
 ; CHECK-NEXT:    vand.vi v8, v12, 1
@@ -202,7 +202,7 @@ define <vscale x 32 x i1> @insertelt_idx_nxv32i1(<vscale x 32 x i1> %x, i1 %elt,
 ; CHECK-NEXT:    vmv.v.i v12, 0
 ; CHECK-NEXT:    vmerge.vim v12, v12, 1, v0
 ; CHECK-NEXT:    addi a0, a1, 1
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vx v12, v8, a1
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, mu
 ; CHECK-NEXT:    vand.vi v8, v12, 1
@@ -219,7 +219,7 @@ define <vscale x 64 x i1> @insertelt_nxv64i1(<vscale x 64 x i1> %x, i1 %elt) {
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    vmv.v.i v16, 0
 ; CHECK-NEXT:    vmerge.vim v16, v16, 1, v0
-; CHECK-NEXT:    vsetivli zero, 3, e8, m8, tu, mu
+; CHECK-NEXT:    vsetivli zero, 3, e8, m8, ta, mu
 ; CHECK-NEXT:    vslideup.vi v16, v8, 2
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m8, ta, mu
 ; CHECK-NEXT:    vand.vi v8, v16, 1
@@ -237,7 +237,7 @@ define <vscale x 64 x i1> @insertelt_idx_nxv64i1(<vscale x 64 x i1> %x, i1 %elt,
 ; CHECK-NEXT:    vmv.v.i v16, 0
 ; CHECK-NEXT:    vmerge.vim v16, v16, 1, v0
 ; CHECK-NEXT:    addi a0, a1, 1
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
 ; CHECK-NEXT:    vslideup.vx v16, v8, a1
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m8, ta, mu
 ; CHECK-NEXT:    vand.vi v8, v16, 1

diff  --git a/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv32.ll
index 47c72c836c8ed..7e3f2b89d11d4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv32.ll
@@ -17,7 +17,7 @@ define <vscale x 1 x i8> @insertelt_nxv1i8_imm(<vscale x 1 x i8> %v, i8 signext
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, mu
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetivli zero, 4, e8, mf8, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e8, mf8, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 1 x i8> %v, i8 %elt, i32 3
@@ -30,7 +30,7 @@ define <vscale x 1 x i8> @insertelt_nxv1i8_idx(<vscale x 1 x i8> %v, i8 signext
 ; CHECK-NEXT:    vsetvli a2, zero, e8, mf8, ta, mu
 ; CHECK-NEXT:    vmv.s.x v9, a0
 ; CHECK-NEXT:    addi a0, a1, 1
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a1
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 1 x i8> %v, i8 %elt, i32 %idx
@@ -52,7 +52,7 @@ define <vscale x 2 x i8> @insertelt_nxv2i8_imm(<vscale x 2 x i8> %v, i8 signext
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, mu
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 2 x i8> %v, i8 %elt, i32 3
@@ -65,7 +65,7 @@ define <vscale x 2 x i8> @insertelt_nxv2i8_idx(<vscale x 2 x i8> %v, i8 signext
 ; CHECK-NEXT:    vsetvli a2, zero, e8, mf4, ta, mu
 ; CHECK-NEXT:    vmv.s.x v9, a0
 ; CHECK-NEXT:    addi a0, a1, 1
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a1
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 2 x i8> %v, i8 %elt, i32 %idx
@@ -87,7 +87,7 @@ define <vscale x 4 x i8> @insertelt_nxv4i8_imm(<vscale x 4 x i8> %v, i8 signext
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 4 x i8> %v, i8 %elt, i32 3
@@ -100,7 +100,7 @@ define <vscale x 4 x i8> @insertelt_nxv4i8_idx(<vscale x 4 x i8> %v, i8 signext
 ; CHECK-NEXT:    vsetvli a2, zero, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmv.s.x v9, a0
 ; CHECK-NEXT:    addi a0, a1, 1
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a1
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 4 x i8> %v, i8 %elt, i32 %idx
@@ -122,7 +122,7 @@ define <vscale x 8 x i8> @insertelt_nxv8i8_imm(<vscale x 8 x i8> %v, i8 signext
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetivli zero, 4, e8, m1, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e8, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 8 x i8> %v, i8 %elt, i32 3
@@ -135,7 +135,7 @@ define <vscale x 8 x i8> @insertelt_nxv8i8_idx(<vscale x 8 x i8> %v, i8 signext
 ; CHECK-NEXT:    vsetvli a2, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vmv.s.x v9, a0
 ; CHECK-NEXT:    addi a0, a1, 1
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a1
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 8 x i8> %v, i8 %elt, i32 %idx
@@ -157,7 +157,7 @@ define <vscale x 16 x i8> @insertelt_nxv16i8_imm(<vscale x 16 x i8> %v, i8 signe
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, mu
 ; CHECK-NEXT:    vmv.s.x v10, a0
-; CHECK-NEXT:    vsetivli zero, 4, e8, m2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e8, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 16 x i8> %v, i8 %elt, i32 3
@@ -170,7 +170,7 @@ define <vscale x 16 x i8> @insertelt_nxv16i8_idx(<vscale x 16 x i8> %v, i8 signe
 ; CHECK-NEXT:    vsetvli a2, zero, e8, m2, ta, mu
 ; CHECK-NEXT:    vmv.s.x v10, a0
 ; CHECK-NEXT:    addi a0, a1, 1
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a1
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 16 x i8> %v, i8 %elt, i32 %idx
@@ -192,7 +192,7 @@ define <vscale x 32 x i8> @insertelt_nxv32i8_imm(<vscale x 32 x i8> %v, i8 signe
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, mu
 ; CHECK-NEXT:    vmv.s.x v12, a0
-; CHECK-NEXT:    vsetivli zero, 4, e8, m4, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e8, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 32 x i8> %v, i8 %elt, i32 3
@@ -205,7 +205,7 @@ define <vscale x 32 x i8> @insertelt_nxv32i8_idx(<vscale x 32 x i8> %v, i8 signe
 ; CHECK-NEXT:    vsetvli a2, zero, e8, m4, ta, mu
 ; CHECK-NEXT:    vmv.s.x v12, a0
 ; CHECK-NEXT:    addi a0, a1, 1
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a1
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 32 x i8> %v, i8 %elt, i32 %idx
@@ -227,7 +227,7 @@ define <vscale x 64 x i8> @insertelt_nxv64i8_imm(<vscale x 64 x i8> %v, i8 signe
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m8, ta, mu
 ; CHECK-NEXT:    vmv.s.x v16, a0
-; CHECK-NEXT:    vsetivli zero, 4, e8, m8, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e8, m8, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v16, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 64 x i8> %v, i8 %elt, i32 3
@@ -240,7 +240,7 @@ define <vscale x 64 x i8> @insertelt_nxv64i8_idx(<vscale x 64 x i8> %v, i8 signe
 ; CHECK-NEXT:    vsetvli a2, zero, e8, m8, ta, mu
 ; CHECK-NEXT:    vmv.s.x v16, a0
 ; CHECK-NEXT:    addi a0, a1, 1
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v16, a1
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 64 x i8> %v, i8 %elt, i32 %idx
@@ -262,7 +262,7 @@ define <vscale x 1 x i16> @insertelt_nxv1i16_imm(<vscale x 1 x i16> %v, i16 sign
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, mu
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetivli zero, 4, e16, mf4, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e16, mf4, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 1 x i16> %v, i16 %elt, i32 3
@@ -275,7 +275,7 @@ define <vscale x 1 x i16> @insertelt_nxv1i16_idx(<vscale x 1 x i16> %v, i16 sign
 ; CHECK-NEXT:    vsetvli a2, zero, e16, mf4, ta, mu
 ; CHECK-NEXT:    vmv.s.x v9, a0
 ; CHECK-NEXT:    addi a0, a1, 1
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a1
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 1 x i16> %v, i16 %elt, i32 %idx
@@ -297,7 +297,7 @@ define <vscale x 2 x i16> @insertelt_nxv2i16_imm(<vscale x 2 x i16> %v, i16 sign
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, mu
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 2 x i16> %v, i16 %elt, i32 3
@@ -310,7 +310,7 @@ define <vscale x 2 x i16> @insertelt_nxv2i16_idx(<vscale x 2 x i16> %v, i16 sign
 ; CHECK-NEXT:    vsetvli a2, zero, e16, mf2, ta, mu
 ; CHECK-NEXT:    vmv.s.x v9, a0
 ; CHECK-NEXT:    addi a0, a1, 1
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a1
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 2 x i16> %v, i16 %elt, i32 %idx
@@ -332,7 +332,7 @@ define <vscale x 4 x i16> @insertelt_nxv4i16_imm(<vscale x 4 x i16> %v, i16 sign
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetivli zero, 4, e16, m1, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e16, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 4 x i16> %v, i16 %elt, i32 3
@@ -345,7 +345,7 @@ define <vscale x 4 x i16> @insertelt_nxv4i16_idx(<vscale x 4 x i16> %v, i16 sign
 ; CHECK-NEXT:    vsetvli a2, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vmv.s.x v9, a0
 ; CHECK-NEXT:    addi a0, a1, 1
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a1
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 4 x i16> %v, i16 %elt, i32 %idx
@@ -367,7 +367,7 @@ define <vscale x 8 x i16> @insertelt_nxv8i16_imm(<vscale x 8 x i16> %v, i16 sign
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, mu
 ; CHECK-NEXT:    vmv.s.x v10, a0
-; CHECK-NEXT:    vsetivli zero, 4, e16, m2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e16, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 8 x i16> %v, i16 %elt, i32 3
@@ -380,7 +380,7 @@ define <vscale x 8 x i16> @insertelt_nxv8i16_idx(<vscale x 8 x i16> %v, i16 sign
 ; CHECK-NEXT:    vsetvli a2, zero, e16, m2, ta, mu
 ; CHECK-NEXT:    vmv.s.x v10, a0
 ; CHECK-NEXT:    addi a0, a1, 1
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a1
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 8 x i16> %v, i16 %elt, i32 %idx
@@ -402,7 +402,7 @@ define <vscale x 16 x i16> @insertelt_nxv16i16_imm(<vscale x 16 x i16> %v, i16 s
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, mu
 ; CHECK-NEXT:    vmv.s.x v12, a0
-; CHECK-NEXT:    vsetivli zero, 4, e16, m4, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e16, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 16 x i16> %v, i16 %elt, i32 3
@@ -415,7 +415,7 @@ define <vscale x 16 x i16> @insertelt_nxv16i16_idx(<vscale x 16 x i16> %v, i16 s
 ; CHECK-NEXT:    vsetvli a2, zero, e16, m4, ta, mu
 ; CHECK-NEXT:    vmv.s.x v12, a0
 ; CHECK-NEXT:    addi a0, a1, 1
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a1
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 16 x i16> %v, i16 %elt, i32 %idx
@@ -437,7 +437,7 @@ define <vscale x 32 x i16> @insertelt_nxv32i16_imm(<vscale x 32 x i16> %v, i16 s
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, mu
 ; CHECK-NEXT:    vmv.s.x v16, a0
-; CHECK-NEXT:    vsetivli zero, 4, e16, m8, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e16, m8, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v16, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 32 x i16> %v, i16 %elt, i32 3
@@ -450,7 +450,7 @@ define <vscale x 32 x i16> @insertelt_nxv32i16_idx(<vscale x 32 x i16> %v, i16 s
 ; CHECK-NEXT:    vsetvli a2, zero, e16, m8, ta, mu
 ; CHECK-NEXT:    vmv.s.x v16, a0
 ; CHECK-NEXT:    addi a0, a1, 1
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v16, a1
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 32 x i16> %v, i16 %elt, i32 %idx
@@ -472,7 +472,7 @@ define <vscale x 1 x i32> @insertelt_nxv1i32_imm(<vscale x 1 x i32> %v, i32 %elt
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetivli zero, 4, e32, mf2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e32, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 1 x i32> %v, i32 %elt, i32 3
@@ -485,7 +485,7 @@ define <vscale x 1 x i32> @insertelt_nxv1i32_idx(<vscale x 1 x i32> %v, i32 %elt
 ; CHECK-NEXT:    vsetvli a2, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vmv.s.x v9, a0
 ; CHECK-NEXT:    addi a0, a1, 1
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a1
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 1 x i32> %v, i32 %elt, i32 %idx
@@ -507,7 +507,7 @@ define <vscale x 2 x i32> @insertelt_nxv2i32_imm(<vscale x 2 x i32> %v, i32 %elt
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetivli zero, 4, e32, m1, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 2 x i32> %v, i32 %elt, i32 3
@@ -520,7 +520,7 @@ define <vscale x 2 x i32> @insertelt_nxv2i32_idx(<vscale x 2 x i32> %v, i32 %elt
 ; CHECK-NEXT:    vsetvli a2, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vmv.s.x v9, a0
 ; CHECK-NEXT:    addi a0, a1, 1
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a1
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 2 x i32> %v, i32 %elt, i32 %idx
@@ -542,7 +542,7 @@ define <vscale x 4 x i32> @insertelt_nxv4i32_imm(<vscale x 4 x i32> %v, i32 %elt
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vmv.s.x v10, a0
-; CHECK-NEXT:    vsetivli zero, 4, e32, m2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e32, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 4 x i32> %v, i32 %elt, i32 3
@@ -555,7 +555,7 @@ define <vscale x 4 x i32> @insertelt_nxv4i32_idx(<vscale x 4 x i32> %v, i32 %elt
 ; CHECK-NEXT:    vsetvli a2, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vmv.s.x v10, a0
 ; CHECK-NEXT:    addi a0, a1, 1
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a1
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 4 x i32> %v, i32 %elt, i32 %idx
@@ -577,7 +577,7 @@ define <vscale x 8 x i32> @insertelt_nxv8i32_imm(<vscale x 8 x i32> %v, i32 %elt
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vmv.s.x v12, a0
-; CHECK-NEXT:    vsetivli zero, 4, e32, m4, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e32, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 8 x i32> %v, i32 %elt, i32 3
@@ -590,7 +590,7 @@ define <vscale x 8 x i32> @insertelt_nxv8i32_idx(<vscale x 8 x i32> %v, i32 %elt
 ; CHECK-NEXT:    vsetvli a2, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vmv.s.x v12, a0
 ; CHECK-NEXT:    addi a0, a1, 1
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a1
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 8 x i32> %v, i32 %elt, i32 %idx
@@ -612,7 +612,7 @@ define <vscale x 16 x i32> @insertelt_nxv16i32_imm(<vscale x 16 x i32> %v, i32 %
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m8, ta, mu
 ; CHECK-NEXT:    vmv.s.x v16, a0
-; CHECK-NEXT:    vsetivli zero, 4, e32, m8, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e32, m8, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v16, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 16 x i32> %v, i32 %elt, i32 3
@@ -625,7 +625,7 @@ define <vscale x 16 x i32> @insertelt_nxv16i32_idx(<vscale x 16 x i32> %v, i32 %
 ; CHECK-NEXT:    vsetvli a2, zero, e32, m8, ta, mu
 ; CHECK-NEXT:    vmv.s.x v16, a0
 ; CHECK-NEXT:    addi a0, a1, 1
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v16, a1
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 16 x i32> %v, i32 %elt, i32 %idx
@@ -639,7 +639,7 @@ define <vscale x 1 x i64> @insertelt_nxv1i64_0(<vscale x 1 x i64> %v, i64 %elt)
 ; CHECK-NEXT:    vmv.v.i v9, 0
 ; CHECK-NEXT:    vslide1up.vx v10, v9, a1
 ; CHECK-NEXT:    vslide1up.vx v9, v10, a0
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, tu, mu
+; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 1 x i64> %v, i64 %elt, i32 0
@@ -653,7 +653,7 @@ define <vscale x 1 x i64> @insertelt_nxv1i64_imm(<vscale x 1 x i64> %v, i64 %elt
 ; CHECK-NEXT:    vmv.v.i v9, 0
 ; CHECK-NEXT:    vslide1up.vx v10, v9, a1
 ; CHECK-NEXT:    vslide1up.vx v9, v10, a0
-; CHECK-NEXT:    vsetivli zero, 4, e64, m1, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e64, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 1 x i64> %v, i64 %elt, i32 3
@@ -668,7 +668,7 @@ define <vscale x 1 x i64> @insertelt_nxv1i64_idx(<vscale x 1 x i64> %v, i64 %elt
 ; CHECK-NEXT:    vslide1up.vx v10, v9, a1
 ; CHECK-NEXT:    vslide1up.vx v9, v10, a0
 ; CHECK-NEXT:    addi a0, a2, 1
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a2
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 1 x i64> %v, i64 %elt, i32 %idx
@@ -682,7 +682,7 @@ define <vscale x 2 x i64> @insertelt_nxv2i64_0(<vscale x 2 x i64> %v, i64 %elt)
 ; CHECK-NEXT:    vmv.v.i v10, 0
 ; CHECK-NEXT:    vslide1up.vx v12, v10, a1
 ; CHECK-NEXT:    vslide1up.vx v10, v12, a0
-; CHECK-NEXT:    vsetivli zero, 1, e64, m2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 1, e64, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 2 x i64> %v, i64 %elt, i32 0
@@ -696,7 +696,7 @@ define <vscale x 2 x i64> @insertelt_nxv2i64_imm(<vscale x 2 x i64> %v, i64 %elt
 ; CHECK-NEXT:    vmv.v.i v10, 0
 ; CHECK-NEXT:    vslide1up.vx v12, v10, a1
 ; CHECK-NEXT:    vslide1up.vx v10, v12, a0
-; CHECK-NEXT:    vsetivli zero, 4, e64, m2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 2 x i64> %v, i64 %elt, i32 3
@@ -711,7 +711,7 @@ define <vscale x 2 x i64> @insertelt_nxv2i64_idx(<vscale x 2 x i64> %v, i64 %elt
 ; CHECK-NEXT:    vslide1up.vx v12, v10, a1
 ; CHECK-NEXT:    vslide1up.vx v10, v12, a0
 ; CHECK-NEXT:    addi a0, a2, 1
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a2
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 2 x i64> %v, i64 %elt, i32 %idx
@@ -725,7 +725,7 @@ define <vscale x 4 x i64> @insertelt_nxv4i64_0(<vscale x 4 x i64> %v, i64 %elt)
 ; CHECK-NEXT:    vmv.v.i v12, 0
 ; CHECK-NEXT:    vslide1up.vx v16, v12, a1
 ; CHECK-NEXT:    vslide1up.vx v12, v16, a0
-; CHECK-NEXT:    vsetivli zero, 1, e64, m4, tu, mu
+; CHECK-NEXT:    vsetivli zero, 1, e64, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 4 x i64> %v, i64 %elt, i32 0
@@ -739,7 +739,7 @@ define <vscale x 4 x i64> @insertelt_nxv4i64_imm(<vscale x 4 x i64> %v, i64 %elt
 ; CHECK-NEXT:    vmv.v.i v12, 0
 ; CHECK-NEXT:    vslide1up.vx v16, v12, a1
 ; CHECK-NEXT:    vslide1up.vx v12, v16, a0
-; CHECK-NEXT:    vsetivli zero, 4, e64, m4, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e64, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 4 x i64> %v, i64 %elt, i32 3
@@ -754,7 +754,7 @@ define <vscale x 4 x i64> @insertelt_nxv4i64_idx(<vscale x 4 x i64> %v, i64 %elt
 ; CHECK-NEXT:    vslide1up.vx v16, v12, a1
 ; CHECK-NEXT:    vslide1up.vx v12, v16, a0
 ; CHECK-NEXT:    addi a0, a2, 1
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a2
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 4 x i64> %v, i64 %elt, i32 %idx
@@ -768,7 +768,7 @@ define <vscale x 8 x i64> @insertelt_nxv8i64_0(<vscale x 8 x i64> %v, i64 %elt)
 ; CHECK-NEXT:    vmv.v.i v16, 0
 ; CHECK-NEXT:    vslide1up.vx v24, v16, a1
 ; CHECK-NEXT:    vslide1up.vx v16, v24, a0
-; CHECK-NEXT:    vsetivli zero, 1, e64, m8, tu, mu
+; CHECK-NEXT:    vsetivli zero, 1, e64, m8, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v16, 0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 8 x i64> %v, i64 %elt, i32 0
@@ -782,7 +782,7 @@ define <vscale x 8 x i64> @insertelt_nxv8i64_imm(<vscale x 8 x i64> %v, i64 %elt
 ; CHECK-NEXT:    vmv.v.i v16, 0
 ; CHECK-NEXT:    vslide1up.vx v24, v16, a1
 ; CHECK-NEXT:    vslide1up.vx v16, v24, a0
-; CHECK-NEXT:    vsetivli zero, 4, e64, m8, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e64, m8, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v16, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 8 x i64> %v, i64 %elt, i32 3
@@ -797,7 +797,7 @@ define <vscale x 8 x i64> @insertelt_nxv8i64_idx(<vscale x 8 x i64> %v, i64 %elt
 ; CHECK-NEXT:    vslide1up.vx v24, v16, a1
 ; CHECK-NEXT:    vslide1up.vx v16, v24, a0
 ; CHECK-NEXT:    addi a0, a2, 1
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v16, a2
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 8 x i64> %v, i64 %elt, i32 %idx
@@ -822,7 +822,7 @@ define <vscale x 2 x i64> @insertelt_nxv2i64_imm_c10(<vscale x 2 x i64> %v) {
 ; CHECK-NEXT:    li a0, 10
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vmv.s.x v10, a0
-; CHECK-NEXT:    vsetivli zero, 4, e64, m2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 2 x i64> %v, i64 10, i32 3
@@ -836,7 +836,7 @@ define <vscale x 2 x i64> @insertelt_nxv2i64_idx_c10(<vscale x 2 x i64> %v, i32
 ; CHECK-NEXT:    vsetvli a2, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vmv.s.x v10, a1
 ; CHECK-NEXT:    addi a1, a0, 1
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 2 x i64> %v, i64 10, i32 %idx
@@ -860,7 +860,7 @@ define <vscale x 2 x i64> @insertelt_nxv2i64_imm_cn1(<vscale x 2 x i64> %v) {
 ; CHECK-NEXT:    li a0, -1
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vmv.s.x v10, a0
-; CHECK-NEXT:    vsetivli zero, 4, e64, m2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 2 x i64> %v, i64 -1, i32 3
@@ -874,7 +874,7 @@ define <vscale x 2 x i64> @insertelt_nxv2i64_idx_cn1(<vscale x 2 x i64> %v, i32
 ; CHECK-NEXT:    vsetvli a2, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vmv.s.x v10, a1
 ; CHECK-NEXT:    addi a1, a0, 1
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 2 x i64> %v, i64 -1, i32 %idx

diff  --git a/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv64.ll
index a1a0065ee7a46..86351bc832e8c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv64.ll
@@ -17,7 +17,7 @@ define <vscale x 1 x i8> @insertelt_nxv1i8_imm(<vscale x 1 x i8> %v, i8 signext
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, mu
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetivli zero, 4, e8, mf8, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e8, mf8, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 1 x i8> %v, i8 %elt, i32 3
@@ -30,7 +30,7 @@ define <vscale x 1 x i8> @insertelt_nxv1i8_idx(<vscale x 1 x i8> %v, i8 signext
 ; CHECK-NEXT:    vsetvli a2, zero, e8, mf8, ta, mu
 ; CHECK-NEXT:    vmv.s.x v9, a0
 ; CHECK-NEXT:    addi a0, a1, 1
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a1
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 1 x i8> %v, i8 %elt, i32 %idx
@@ -52,7 +52,7 @@ define <vscale x 2 x i8> @insertelt_nxv2i8_imm(<vscale x 2 x i8> %v, i8 signext
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, mu
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 2 x i8> %v, i8 %elt, i32 3
@@ -65,7 +65,7 @@ define <vscale x 2 x i8> @insertelt_nxv2i8_idx(<vscale x 2 x i8> %v, i8 signext
 ; CHECK-NEXT:    vsetvli a2, zero, e8, mf4, ta, mu
 ; CHECK-NEXT:    vmv.s.x v9, a0
 ; CHECK-NEXT:    addi a0, a1, 1
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a1
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 2 x i8> %v, i8 %elt, i32 %idx
@@ -87,7 +87,7 @@ define <vscale x 4 x i8> @insertelt_nxv4i8_imm(<vscale x 4 x i8> %v, i8 signext
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 4 x i8> %v, i8 %elt, i32 3
@@ -100,7 +100,7 @@ define <vscale x 4 x i8> @insertelt_nxv4i8_idx(<vscale x 4 x i8> %v, i8 signext
 ; CHECK-NEXT:    vsetvli a2, zero, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmv.s.x v9, a0
 ; CHECK-NEXT:    addi a0, a1, 1
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a1
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 4 x i8> %v, i8 %elt, i32 %idx
@@ -122,7 +122,7 @@ define <vscale x 8 x i8> @insertelt_nxv8i8_imm(<vscale x 8 x i8> %v, i8 signext
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetivli zero, 4, e8, m1, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e8, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 8 x i8> %v, i8 %elt, i32 3
@@ -135,7 +135,7 @@ define <vscale x 8 x i8> @insertelt_nxv8i8_idx(<vscale x 8 x i8> %v, i8 signext
 ; CHECK-NEXT:    vsetvli a2, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vmv.s.x v9, a0
 ; CHECK-NEXT:    addi a0, a1, 1
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a1
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 8 x i8> %v, i8 %elt, i32 %idx
@@ -157,7 +157,7 @@ define <vscale x 16 x i8> @insertelt_nxv16i8_imm(<vscale x 16 x i8> %v, i8 signe
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, mu
 ; CHECK-NEXT:    vmv.s.x v10, a0
-; CHECK-NEXT:    vsetivli zero, 4, e8, m2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e8, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 16 x i8> %v, i8 %elt, i32 3
@@ -170,7 +170,7 @@ define <vscale x 16 x i8> @insertelt_nxv16i8_idx(<vscale x 16 x i8> %v, i8 signe
 ; CHECK-NEXT:    vsetvli a2, zero, e8, m2, ta, mu
 ; CHECK-NEXT:    vmv.s.x v10, a0
 ; CHECK-NEXT:    addi a0, a1, 1
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a1
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 16 x i8> %v, i8 %elt, i32 %idx
@@ -192,7 +192,7 @@ define <vscale x 32 x i8> @insertelt_nxv32i8_imm(<vscale x 32 x i8> %v, i8 signe
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, mu
 ; CHECK-NEXT:    vmv.s.x v12, a0
-; CHECK-NEXT:    vsetivli zero, 4, e8, m4, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e8, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 32 x i8> %v, i8 %elt, i32 3
@@ -205,7 +205,7 @@ define <vscale x 32 x i8> @insertelt_nxv32i8_idx(<vscale x 32 x i8> %v, i8 signe
 ; CHECK-NEXT:    vsetvli a2, zero, e8, m4, ta, mu
 ; CHECK-NEXT:    vmv.s.x v12, a0
 ; CHECK-NEXT:    addi a0, a1, 1
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a1
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 32 x i8> %v, i8 %elt, i32 %idx
@@ -227,7 +227,7 @@ define <vscale x 64 x i8> @insertelt_nxv64i8_imm(<vscale x 64 x i8> %v, i8 signe
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m8, ta, mu
 ; CHECK-NEXT:    vmv.s.x v16, a0
-; CHECK-NEXT:    vsetivli zero, 4, e8, m8, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e8, m8, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v16, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 64 x i8> %v, i8 %elt, i32 3
@@ -240,7 +240,7 @@ define <vscale x 64 x i8> @insertelt_nxv64i8_idx(<vscale x 64 x i8> %v, i8 signe
 ; CHECK-NEXT:    vsetvli a2, zero, e8, m8, ta, mu
 ; CHECK-NEXT:    vmv.s.x v16, a0
 ; CHECK-NEXT:    addi a0, a1, 1
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v16, a1
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 64 x i8> %v, i8 %elt, i32 %idx
@@ -262,7 +262,7 @@ define <vscale x 1 x i16> @insertelt_nxv1i16_imm(<vscale x 1 x i16> %v, i16 sign
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, mu
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetivli zero, 4, e16, mf4, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e16, mf4, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 1 x i16> %v, i16 %elt, i32 3
@@ -275,7 +275,7 @@ define <vscale x 1 x i16> @insertelt_nxv1i16_idx(<vscale x 1 x i16> %v, i16 sign
 ; CHECK-NEXT:    vsetvli a2, zero, e16, mf4, ta, mu
 ; CHECK-NEXT:    vmv.s.x v9, a0
 ; CHECK-NEXT:    addi a0, a1, 1
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a1
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 1 x i16> %v, i16 %elt, i32 %idx
@@ -297,7 +297,7 @@ define <vscale x 2 x i16> @insertelt_nxv2i16_imm(<vscale x 2 x i16> %v, i16 sign
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, mu
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 2 x i16> %v, i16 %elt, i32 3
@@ -310,7 +310,7 @@ define <vscale x 2 x i16> @insertelt_nxv2i16_idx(<vscale x 2 x i16> %v, i16 sign
 ; CHECK-NEXT:    vsetvli a2, zero, e16, mf2, ta, mu
 ; CHECK-NEXT:    vmv.s.x v9, a0
 ; CHECK-NEXT:    addi a0, a1, 1
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a1
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 2 x i16> %v, i16 %elt, i32 %idx
@@ -332,7 +332,7 @@ define <vscale x 4 x i16> @insertelt_nxv4i16_imm(<vscale x 4 x i16> %v, i16 sign
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetivli zero, 4, e16, m1, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e16, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 4 x i16> %v, i16 %elt, i32 3
@@ -345,7 +345,7 @@ define <vscale x 4 x i16> @insertelt_nxv4i16_idx(<vscale x 4 x i16> %v, i16 sign
 ; CHECK-NEXT:    vsetvli a2, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vmv.s.x v9, a0
 ; CHECK-NEXT:    addi a0, a1, 1
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a1
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 4 x i16> %v, i16 %elt, i32 %idx
@@ -367,7 +367,7 @@ define <vscale x 8 x i16> @insertelt_nxv8i16_imm(<vscale x 8 x i16> %v, i16 sign
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, mu
 ; CHECK-NEXT:    vmv.s.x v10, a0
-; CHECK-NEXT:    vsetivli zero, 4, e16, m2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e16, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 8 x i16> %v, i16 %elt, i32 3
@@ -380,7 +380,7 @@ define <vscale x 8 x i16> @insertelt_nxv8i16_idx(<vscale x 8 x i16> %v, i16 sign
 ; CHECK-NEXT:    vsetvli a2, zero, e16, m2, ta, mu
 ; CHECK-NEXT:    vmv.s.x v10, a0
 ; CHECK-NEXT:    addi a0, a1, 1
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a1
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 8 x i16> %v, i16 %elt, i32 %idx
@@ -402,7 +402,7 @@ define <vscale x 16 x i16> @insertelt_nxv16i16_imm(<vscale x 16 x i16> %v, i16 s
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, mu
 ; CHECK-NEXT:    vmv.s.x v12, a0
-; CHECK-NEXT:    vsetivli zero, 4, e16, m4, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e16, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 16 x i16> %v, i16 %elt, i32 3
@@ -415,7 +415,7 @@ define <vscale x 16 x i16> @insertelt_nxv16i16_idx(<vscale x 16 x i16> %v, i16 s
 ; CHECK-NEXT:    vsetvli a2, zero, e16, m4, ta, mu
 ; CHECK-NEXT:    vmv.s.x v12, a0
 ; CHECK-NEXT:    addi a0, a1, 1
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a1
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 16 x i16> %v, i16 %elt, i32 %idx
@@ -437,7 +437,7 @@ define <vscale x 32 x i16> @insertelt_nxv32i16_imm(<vscale x 32 x i16> %v, i16 s
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, mu
 ; CHECK-NEXT:    vmv.s.x v16, a0
-; CHECK-NEXT:    vsetivli zero, 4, e16, m8, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e16, m8, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v16, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 32 x i16> %v, i16 %elt, i32 3
@@ -450,7 +450,7 @@ define <vscale x 32 x i16> @insertelt_nxv32i16_idx(<vscale x 32 x i16> %v, i16 s
 ; CHECK-NEXT:    vsetvli a2, zero, e16, m8, ta, mu
 ; CHECK-NEXT:    vmv.s.x v16, a0
 ; CHECK-NEXT:    addi a0, a1, 1
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v16, a1
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 32 x i16> %v, i16 %elt, i32 %idx
@@ -472,7 +472,7 @@ define <vscale x 1 x i32> @insertelt_nxv1i32_imm(<vscale x 1 x i32> %v, i32 sign
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetivli zero, 4, e32, mf2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e32, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 1 x i32> %v, i32 %elt, i32 3
@@ -485,7 +485,7 @@ define <vscale x 1 x i32> @insertelt_nxv1i32_idx(<vscale x 1 x i32> %v, i32 sign
 ; CHECK-NEXT:    vsetvli a2, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vmv.s.x v9, a0
 ; CHECK-NEXT:    addi a0, a1, 1
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a1
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 1 x i32> %v, i32 %elt, i32 %idx
@@ -507,7 +507,7 @@ define <vscale x 2 x i32> @insertelt_nxv2i32_imm(<vscale x 2 x i32> %v, i32 sign
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetivli zero, 4, e32, m1, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 2 x i32> %v, i32 %elt, i32 3
@@ -520,7 +520,7 @@ define <vscale x 2 x i32> @insertelt_nxv2i32_idx(<vscale x 2 x i32> %v, i32 sign
 ; CHECK-NEXT:    vsetvli a2, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vmv.s.x v9, a0
 ; CHECK-NEXT:    addi a0, a1, 1
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a1
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 2 x i32> %v, i32 %elt, i32 %idx
@@ -542,7 +542,7 @@ define <vscale x 4 x i32> @insertelt_nxv4i32_imm(<vscale x 4 x i32> %v, i32 sign
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vmv.s.x v10, a0
-; CHECK-NEXT:    vsetivli zero, 4, e32, m2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e32, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 4 x i32> %v, i32 %elt, i32 3
@@ -555,7 +555,7 @@ define <vscale x 4 x i32> @insertelt_nxv4i32_idx(<vscale x 4 x i32> %v, i32 sign
 ; CHECK-NEXT:    vsetvli a2, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vmv.s.x v10, a0
 ; CHECK-NEXT:    addi a0, a1, 1
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a1
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 4 x i32> %v, i32 %elt, i32 %idx
@@ -577,7 +577,7 @@ define <vscale x 8 x i32> @insertelt_nxv8i32_imm(<vscale x 8 x i32> %v, i32 sign
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vmv.s.x v12, a0
-; CHECK-NEXT:    vsetivli zero, 4, e32, m4, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e32, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 8 x i32> %v, i32 %elt, i32 3
@@ -590,7 +590,7 @@ define <vscale x 8 x i32> @insertelt_nxv8i32_idx(<vscale x 8 x i32> %v, i32 sign
 ; CHECK-NEXT:    vsetvli a2, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vmv.s.x v12, a0
 ; CHECK-NEXT:    addi a0, a1, 1
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a1
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 8 x i32> %v, i32 %elt, i32 %idx
@@ -612,7 +612,7 @@ define <vscale x 16 x i32> @insertelt_nxv16i32_imm(<vscale x 16 x i32> %v, i32 s
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m8, ta, mu
 ; CHECK-NEXT:    vmv.s.x v16, a0
-; CHECK-NEXT:    vsetivli zero, 4, e32, m8, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e32, m8, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v16, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 16 x i32> %v, i32 %elt, i32 3
@@ -625,7 +625,7 @@ define <vscale x 16 x i32> @insertelt_nxv16i32_idx(<vscale x 16 x i32> %v, i32 s
 ; CHECK-NEXT:    vsetvli a2, zero, e32, m8, ta, mu
 ; CHECK-NEXT:    vmv.s.x v16, a0
 ; CHECK-NEXT:    addi a0, a1, 1
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v16, a1
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 16 x i32> %v, i32 %elt, i32 %idx
@@ -647,7 +647,7 @@ define <vscale x 1 x i64> @insertelt_nxv1i64_imm(<vscale x 1 x i64> %v, i64 %elt
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetivli zero, 4, e64, m1, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e64, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 1 x i64> %v, i64 %elt, i32 3
@@ -661,7 +661,7 @@ define <vscale x 1 x i64> @insertelt_nxv1i64_idx(<vscale x 1 x i64> %v, i64 %elt
 ; CHECK-NEXT:    vmv.s.x v9, a0
 ; CHECK-NEXT:    sext.w a0, a1
 ; CHECK-NEXT:    addi a1, a0, 1
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 1 x i64> %v, i64 %elt, i32 %idx
@@ -683,7 +683,7 @@ define <vscale x 2 x i64> @insertelt_nxv2i64_imm(<vscale x 2 x i64> %v, i64 %elt
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vmv.s.x v10, a0
-; CHECK-NEXT:    vsetivli zero, 4, e64, m2, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 2 x i64> %v, i64 %elt, i32 3
@@ -697,7 +697,7 @@ define <vscale x 2 x i64> @insertelt_nxv2i64_idx(<vscale x 2 x i64> %v, i64 %elt
 ; CHECK-NEXT:    vmv.s.x v10, a0
 ; CHECK-NEXT:    sext.w a0, a1
 ; CHECK-NEXT:    addi a1, a0, 1
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 2 x i64> %v, i64 %elt, i32 %idx
@@ -719,7 +719,7 @@ define <vscale x 4 x i64> @insertelt_nxv4i64_imm(<vscale x 4 x i64> %v, i64 %elt
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vmv.s.x v12, a0
-; CHECK-NEXT:    vsetivli zero, 4, e64, m4, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e64, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 4 x i64> %v, i64 %elt, i32 3
@@ -733,7 +733,7 @@ define <vscale x 4 x i64> @insertelt_nxv4i64_idx(<vscale x 4 x i64> %v, i64 %elt
 ; CHECK-NEXT:    vmv.s.x v12, a0
 ; CHECK-NEXT:    sext.w a0, a1
 ; CHECK-NEXT:    addi a1, a0, 1
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 4 x i64> %v, i64 %elt, i32 %idx
@@ -755,7 +755,7 @@ define <vscale x 8 x i64> @insertelt_nxv8i64_imm(<vscale x 8 x i64> %v, i64 %elt
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vmv.s.x v16, a0
-; CHECK-NEXT:    vsetivli zero, 4, e64, m8, tu, mu
+; CHECK-NEXT:    vsetivli zero, 4, e64, m8, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v16, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 8 x i64> %v, i64 %elt, i32 3
@@ -769,7 +769,7 @@ define <vscale x 8 x i64> @insertelt_nxv8i64_idx(<vscale x 8 x i64> %v, i64 %elt
 ; CHECK-NEXT:    vmv.s.x v16, a0
 ; CHECK-NEXT:    sext.w a0, a1
 ; CHECK-NEXT:    addi a1, a0, 1
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v16, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 8 x i64> %v, i64 %elt, i32 %idx

diff  --git a/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll b/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll
index f2e738751bd55..deac5354f126a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll
@@ -1146,20 +1146,19 @@ declare <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8(
   <vscale x 1 x i1>,
   iXLen, iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, iXLen %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+define <vscale x 1 x i8> @intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT:    vslideup.vx v9, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8(
-    <vscale x 1 x i8> undef,
     <vscale x 1 x i8> %0,
-    iXLen %1,
-    <vscale x 1 x i1> %2,
-    iXLen %3, iXLen 3)
+    <vscale x 1 x i8> %1,
+    iXLen %2,
+    <vscale x 1 x i1> %3,
+    iXLen %4, iXLen 3)
 
   ret <vscale x 1 x i8> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/setcc-fp.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-fp.ll
index b243ecc5f4b0d..b5093fc6cb1c6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/setcc-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/setcc-fp.ll
@@ -2521,7 +2521,7 @@ define <vscale x 16 x i1> @fcmp_oeq_vf_nx16f64(<vscale x 16 x double> %va) {
 ; RV32-NEXT:    csrr a0, vlenb
 ; RV32-NEXT:    srli a0, a0, 3
 ; RV32-NEXT:    add a1, a0, a0
-; RV32-NEXT:    vsetvli zero, a1, e8, mf4, tu, mu
+; RV32-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; RV32-NEXT:    vslideup.vx v0, v24, a0
 ; RV32-NEXT:    ret
 ;
@@ -2534,7 +2534,7 @@ define <vscale x 16 x i1> @fcmp_oeq_vf_nx16f64(<vscale x 16 x double> %va) {
 ; RV64-NEXT:    csrr a0, vlenb
 ; RV64-NEXT:    srli a0, a0, 3
 ; RV64-NEXT:    add a1, a0, a0
-; RV64-NEXT:    vsetvli zero, a1, e8, mf4, tu, mu
+; RV64-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; RV64-NEXT:    vslideup.vx v0, v24, a0
 ; RV64-NEXT:    ret
   %vc = fcmp oeq <vscale x 16 x double> %va, zeroinitializer

diff  --git a/llvm/test/CodeGen/RISCV/rvv/setcc-integer.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-integer.ll
index a4731700861d2..9db0674187c19 100644
--- a/llvm/test/CodeGen/RISCV/rvv/setcc-integer.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/setcc-integer.ll
@@ -3201,7 +3201,7 @@ define <vscale x 16 x i1> @icmp_eq_vi_nx16i64(<vscale x 16 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vmseq.vi v24, v16, 0
 ; CHECK-NEXT:    vmseq.vi v0, v8, 0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vslideup.vx v0, v24, a0
 ; CHECK-NEXT:    ret
   %vc = icmp eq <vscale x 16 x i64> %va, zeroinitializer

diff  --git a/llvm/test/CodeGen/RISCV/rvv/unmasked-ta.ll b/llvm/test/CodeGen/RISCV/rvv/unmasked-ta.ll
index c7844882c37c1..8fbfa64cc99f6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/unmasked-ta.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/unmasked-ta.ll
@@ -867,6 +867,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   iXLen,
+  iXLen,
   iXLen);
 
 define <vscale x 1 x i8> @intrinsic_vslidedown_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, iXLen %1, iXLen %2) nounwind {
@@ -880,7 +881,8 @@ entry:
     <vscale x 1 x i8> undef,
     <vscale x 1 x i8> %0,
     iXLen %1,
-    iXLen %2)
+    iXLen %2,
+    iXLen 1)
 
   ret <vscale x 1 x i8> %a
 }
@@ -889,21 +891,22 @@ declare <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   iXLen,
+  iXLen,
   iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vslideup_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, iXLen %1, iXLen %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vslideup_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vslideup.vx v9, v8, a0
-; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8(
-    <vscale x 1 x i8> undef,
     <vscale x 1 x i8> %0,
-    iXLen %1,
-    iXLen %2)
+    <vscale x 1 x i8> %1,
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
 
   ret <vscale x 1 x i8> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll b/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll
index 9e3a3c9cd8349..1a32a4e6d0aa1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll
@@ -21,7 +21,7 @@ define <vscale x 1 x i8> @splice_nxv1i8_offset_negone(<vscale x 1 x i8> %a, <vsc
 ; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 1 x i8> @llvm.experimental.vector.splice.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b, i32 -1)
@@ -36,7 +36,7 @@ define <vscale x 1 x i8> @splice_nxv1i8_offset_min(<vscale x 1 x i8> %a, <vscale
 ; CHECK-NEXT:    addi a0, a0, -2
 ; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 2
 ; CHECK-NEXT:    ret
   %res = call <vscale x 1 x i8> @llvm.experimental.vector.splice.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b, i32 -2)
@@ -51,7 +51,7 @@ define <vscale x 1 x i8> @splice_nxv1i8_offset_max(<vscale x 1 x i8> %a, <vscale
 ; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 1
-; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, tu, mu
+; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 1 x i8> @llvm.experimental.vector.splice.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b, i32 1)
@@ -76,7 +76,7 @@ define <vscale x 2 x i8> @splice_nxv2i8_offset_negone(<vscale x 2 x i8> %a, <vsc
 ; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 2 x i8> @llvm.experimental.vector.splice.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b, i32 -1)
@@ -91,7 +91,7 @@ define <vscale x 2 x i8> @splice_nxv2i8_offset_min(<vscale x 2 x i8> %a, <vscale
 ; CHECK-NEXT:    addi a0, a0, -4
 ; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 4
 ; CHECK-NEXT:    ret
   %res = call <vscale x 2 x i8> @llvm.experimental.vector.splice.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b, i32 -4)
@@ -106,7 +106,7 @@ define <vscale x 2 x i8> @splice_nxv2i8_offset_max(<vscale x 2 x i8> %a, <vscale
 ; CHECK-NEXT:    addi a0, a0, -3
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 3
-; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, tu, mu
+; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 2 x i8> @llvm.experimental.vector.splice.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b, i32 3)
@@ -131,7 +131,7 @@ define <vscale x 4 x i8> @splice_nxv4i8_offset_negone(<vscale x 4 x i8> %a, <vsc
 ; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 4 x i8> @llvm.experimental.vector.splice.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b, i32 -1)
@@ -146,7 +146,7 @@ define <vscale x 4 x i8> @splice_nxv4i8_offset_min(<vscale x 4 x i8> %a, <vscale
 ; CHECK-NEXT:    addi a0, a0, -8
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 8
 ; CHECK-NEXT:    ret
   %res = call <vscale x 4 x i8> @llvm.experimental.vector.splice.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b, i32 -8)
@@ -161,7 +161,7 @@ define <vscale x 4 x i8> @splice_nxv4i8_offset_max(<vscale x 4 x i8> %a, <vscale
 ; CHECK-NEXT:    addi a0, a0, -7
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 7
-; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 4 x i8> @llvm.experimental.vector.splice.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b, i32 7)
@@ -185,7 +185,7 @@ define <vscale x 8 x i8> @splice_nxv8i8_offset_negone(<vscale x 8 x i8> %a, <vsc
 ; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e8, m1, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 8 x i8> @llvm.experimental.vector.splice.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, i32 -1)
@@ -199,7 +199,7 @@ define <vscale x 8 x i8> @splice_nxv8i8_offset_min(<vscale x 8 x i8> %a, <vscale
 ; CHECK-NEXT:    addi a0, a0, -16
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e8, m1, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 16
 ; CHECK-NEXT:    ret
   %res = call <vscale x 8 x i8> @llvm.experimental.vector.splice.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, i32 -16)
@@ -213,7 +213,7 @@ define <vscale x 8 x i8> @splice_nxv8i8_offset_max(<vscale x 8 x i8> %a, <vscale
 ; CHECK-NEXT:    addi a0, a0, -15
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 15
-; CHECK-NEXT:    vsetvli a1, zero, e8, m1, tu, mu
+; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 8 x i8> @llvm.experimental.vector.splice.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, i32 15)
@@ -238,7 +238,7 @@ define <vscale x 16 x i8> @splice_nxv16i8_offset_negone(<vscale x 16 x i8> %a, <
 ; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    vsetivli zero, 1, e8, m2, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e8, m2, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 16 x i8> @llvm.experimental.vector.splice.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, i32 -1)
@@ -254,7 +254,7 @@ define <vscale x 16 x i8> @splice_nxv16i8_offset_min(<vscale x 16 x i8> %a, <vsc
 ; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e8, m2, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 16 x i8> @llvm.experimental.vector.splice.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, i32 -32)
@@ -269,7 +269,7 @@ define <vscale x 16 x i8> @splice_nxv16i8_offset_max(<vscale x 16 x i8> %a, <vsc
 ; CHECK-NEXT:    addi a0, a0, -31
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 31
-; CHECK-NEXT:    vsetvli a1, zero, e8, m2, tu, mu
+; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 16 x i8> @llvm.experimental.vector.splice.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, i32 31)
@@ -294,7 +294,7 @@ define <vscale x 32 x i8> @splice_nxv32i8_offset_negone(<vscale x 32 x i8> %a, <
 ; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    vsetivli zero, 1, e8, m4, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e8, m4, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 32 x i8> @llvm.experimental.vector.splice.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b, i32 -1)
@@ -310,7 +310,7 @@ define <vscale x 32 x i8> @splice_nxv32i8_offset_min(<vscale x 32 x i8> %a, <vsc
 ; CHECK-NEXT:    li a1, 64
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e8, m4, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 32 x i8> @llvm.experimental.vector.splice.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b, i32 -64)
@@ -326,7 +326,7 @@ define <vscale x 32 x i8> @splice_nxv32i8_offset_max(<vscale x 32 x i8> %a, <vsc
 ; CHECK-NEXT:    li a1, 63
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a1
-; CHECK-NEXT:    vsetvli a1, zero, e8, m4, tu, mu
+; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 32 x i8> @llvm.experimental.vector.splice.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b, i32 63)
@@ -351,7 +351,7 @@ define <vscale x 64 x i8> @splice_nxv64i8_offset_negone(<vscale x 64 x i8> %a, <
 ; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    vsetivli zero, 1, e8, m8, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e8, m8, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e8, m8, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v16, 1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 64 x i8> @llvm.experimental.vector.splice.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b, i32 -1)
@@ -367,7 +367,7 @@ define <vscale x 64 x i8> @splice_nxv64i8_offset_min(<vscale x 64 x i8> %a, <vsc
 ; CHECK-NEXT:    li a1, 128
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e8, m8, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e8, m8, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v16, a1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 64 x i8> @llvm.experimental.vector.splice.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b, i32 -128)
@@ -383,7 +383,7 @@ define <vscale x 64 x i8> @splice_nxv64i8_offset_max(<vscale x 64 x i8> %a, <vsc
 ; CHECK-NEXT:    li a1, 127
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a1
-; CHECK-NEXT:    vsetvli a1, zero, e8, m8, tu, mu
+; CHECK-NEXT:    vsetvli a1, zero, e8, m8, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v16, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 64 x i8> @llvm.experimental.vector.splice.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b, i32 127)
@@ -408,7 +408,7 @@ define <vscale x 1 x i16> @splice_nxv1i16_offset_negone(<vscale x 1 x i16> %a, <
 ; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 1 x i16> @llvm.experimental.vector.splice.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b, i32 -1)
@@ -423,7 +423,7 @@ define <vscale x 1 x i16> @splice_nxv1i16_offset_min(<vscale x 1 x i16> %a, <vsc
 ; CHECK-NEXT:    addi a0, a0, -2
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 2
 ; CHECK-NEXT:    ret
   %res = call <vscale x 1 x i16> @llvm.experimental.vector.splice.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b, i32 -2)
@@ -438,7 +438,7 @@ define <vscale x 1 x i16> @splice_nxv1i16_offset_max(<vscale x 1 x i16> %a, <vsc
 ; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 1
-; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, tu, mu
+; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 1 x i16> @llvm.experimental.vector.splice.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b, i32 1)
@@ -463,7 +463,7 @@ define <vscale x 2 x i16> @splice_nxv2i16_offset_negone(<vscale x 2 x i16> %a, <
 ; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 2 x i16> @llvm.experimental.vector.splice.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b, i32 -1)
@@ -478,7 +478,7 @@ define <vscale x 2 x i16> @splice_nxv2i16_offset_min(<vscale x 2 x i16> %a, <vsc
 ; CHECK-NEXT:    addi a0, a0, -4
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 4
 ; CHECK-NEXT:    ret
   %res = call <vscale x 2 x i16> @llvm.experimental.vector.splice.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b, i32 -4)
@@ -493,7 +493,7 @@ define <vscale x 2 x i16> @splice_nxv2i16_offset_max(<vscale x 2 x i16> %a, <vsc
 ; CHECK-NEXT:    addi a0, a0, -3
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 3
-; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, tu, mu
+; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 2 x i16> @llvm.experimental.vector.splice.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b, i32 3)
@@ -518,7 +518,7 @@ define <vscale x 4 x i16> @splice_nxv4i16_offset_negone(<vscale x 4 x i16> %a, <
 ; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e16, m1, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 4 x i16> @llvm.experimental.vector.splice.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b, i32 -1)
@@ -533,7 +533,7 @@ define <vscale x 4 x i16> @splice_nxv4i16_offset_min(<vscale x 4 x i16> %a, <vsc
 ; CHECK-NEXT:    addi a0, a0, -8
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e16, m1, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 8
 ; CHECK-NEXT:    ret
   %res = call <vscale x 4 x i16> @llvm.experimental.vector.splice.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b, i32 -8)
@@ -548,7 +548,7 @@ define <vscale x 4 x i16> @splice_nxv4i16_offset_max(<vscale x 4 x i16> %a, <vsc
 ; CHECK-NEXT:    addi a0, a0, -7
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 7
-; CHECK-NEXT:    vsetvli a1, zero, e16, m1, tu, mu
+; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 4 x i16> @llvm.experimental.vector.splice.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b, i32 7)
@@ -572,7 +572,7 @@ define <vscale x 8 x i16> @splice_nxv8i16_offset_negone(<vscale x 8 x i16> %a, <
 ; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    vsetivli zero, 1, e16, m2, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e16, m2, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 8 x i16> @llvm.experimental.vector.splice.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, i32 -1)
@@ -586,7 +586,7 @@ define <vscale x 8 x i16> @splice_nxv8i16_offset_min(<vscale x 8 x i16> %a, <vsc
 ; CHECK-NEXT:    addi a0, a0, -16
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e16, m2, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 16
 ; CHECK-NEXT:    ret
   %res = call <vscale x 8 x i16> @llvm.experimental.vector.splice.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, i32 -16)
@@ -600,7 +600,7 @@ define <vscale x 8 x i16> @splice_nxv8i16_offset_max(<vscale x 8 x i16> %a, <vsc
 ; CHECK-NEXT:    addi a0, a0, -15
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 15
-; CHECK-NEXT:    vsetvli a1, zero, e16, m2, tu, mu
+; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 8 x i16> @llvm.experimental.vector.splice.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, i32 15)
@@ -625,7 +625,7 @@ define <vscale x 16 x i16> @splice_nxv16i16_offset_negone(<vscale x 16 x i16> %a
 ; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    vsetivli zero, 1, e16, m4, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e16, m4, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 16 x i16> @llvm.experimental.vector.splice.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b, i32 -1)
@@ -641,7 +641,7 @@ define <vscale x 16 x i16> @splice_nxv16i16_offset_min(<vscale x 16 x i16> %a, <
 ; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e16, m4, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 16 x i16> @llvm.experimental.vector.splice.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b, i32 -32)
@@ -656,7 +656,7 @@ define <vscale x 16 x i16> @splice_nxv16i16_offset_max(<vscale x 16 x i16> %a, <
 ; CHECK-NEXT:    addi a0, a0, -31
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 31
-; CHECK-NEXT:    vsetvli a1, zero, e16, m4, tu, mu
+; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 16 x i16> @llvm.experimental.vector.splice.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b, i32 31)
@@ -681,7 +681,7 @@ define <vscale x 32 x i16> @splice_nxv32i16_offset_negone(<vscale x 32 x i16> %a
 ; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    vsetivli zero, 1, e16, m8, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e16, m8, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v16, 1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 32 x i16> @llvm.experimental.vector.splice.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b, i32 -1)
@@ -697,7 +697,7 @@ define <vscale x 32 x i16> @splice_nxv32i16_offset_min(<vscale x 32 x i16> %a, <
 ; CHECK-NEXT:    li a1, 64
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e16, m8, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v16, a1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 32 x i16> @llvm.experimental.vector.splice.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b, i32 -64)
@@ -713,7 +713,7 @@ define <vscale x 32 x i16> @splice_nxv32i16_offset_max(<vscale x 32 x i16> %a, <
 ; CHECK-NEXT:    li a1, 63
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a1
-; CHECK-NEXT:    vsetvli a1, zero, e16, m8, tu, mu
+; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v16, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 32 x i16> @llvm.experimental.vector.splice.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b, i32 63)
@@ -738,7 +738,7 @@ define <vscale x 1 x i32> @splice_nxv1i32_offset_negone(<vscale x 1 x i32> %a, <
 ; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 1 x i32> @llvm.experimental.vector.splice.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %b, i32 -1)
@@ -753,7 +753,7 @@ define <vscale x 1 x i32> @splice_nxv1i32_offset_min(<vscale x 1 x i32> %a, <vsc
 ; CHECK-NEXT:    addi a0, a0, -2
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 2
 ; CHECK-NEXT:    ret
   %res = call <vscale x 1 x i32> @llvm.experimental.vector.splice.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %b, i32 -2)
@@ -768,7 +768,7 @@ define <vscale x 1 x i32> @splice_nxv1i32_offset_max(<vscale x 1 x i32> %a, <vsc
 ; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 1
-; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, tu, mu
+; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 1 x i32> @llvm.experimental.vector.splice.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %b, i32 1)
@@ -793,7 +793,7 @@ define <vscale x 2 x i32> @splice_nxv2i32_offset_negone(<vscale x 2 x i32> %a, <
 ; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e32, m1, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 2 x i32> @llvm.experimental.vector.splice.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b, i32 -1)
@@ -808,7 +808,7 @@ define <vscale x 2 x i32> @splice_nxv2i32_offset_min(<vscale x 2 x i32> %a, <vsc
 ; CHECK-NEXT:    addi a0, a0, -4
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e32, m1, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 4
 ; CHECK-NEXT:    ret
   %res = call <vscale x 2 x i32> @llvm.experimental.vector.splice.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b, i32 -4)
@@ -823,7 +823,7 @@ define <vscale x 2 x i32> @splice_nxv2i32_offset_max(<vscale x 2 x i32> %a, <vsc
 ; CHECK-NEXT:    addi a0, a0, -3
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 3
-; CHECK-NEXT:    vsetvli a1, zero, e32, m1, tu, mu
+; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 2 x i32> @llvm.experimental.vector.splice.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b, i32 3)
@@ -848,7 +848,7 @@ define <vscale x 4 x i32> @splice_nxv4i32_offset_negone(<vscale x 4 x i32> %a, <
 ; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    vsetivli zero, 1, e32, m2, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e32, m2, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 4 x i32> @llvm.experimental.vector.splice.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, i32 -1)
@@ -863,7 +863,7 @@ define <vscale x 4 x i32> @splice_nxv4i32_offset_min(<vscale x 4 x i32> %a, <vsc
 ; CHECK-NEXT:    addi a0, a0, -8
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e32, m2, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 8
 ; CHECK-NEXT:    ret
   %res = call <vscale x 4 x i32> @llvm.experimental.vector.splice.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, i32 -8)
@@ -878,7 +878,7 @@ define <vscale x 4 x i32> @splice_nxv4i32_offset_max(<vscale x 4 x i32> %a, <vsc
 ; CHECK-NEXT:    addi a0, a0, -7
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 7
-; CHECK-NEXT:    vsetvli a1, zero, e32, m2, tu, mu
+; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 4 x i32> @llvm.experimental.vector.splice.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, i32 7)
@@ -902,7 +902,7 @@ define <vscale x 8 x i32> @splice_nxv8i32_offset_negone(<vscale x 8 x i32> %a, <
 ; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    vsetivli zero, 1, e32, m4, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e32, m4, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 8 x i32> @llvm.experimental.vector.splice.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b, i32 -1)
@@ -916,7 +916,7 @@ define <vscale x 8 x i32> @splice_nxv8i32_offset_min(<vscale x 8 x i32> %a, <vsc
 ; CHECK-NEXT:    addi a0, a0, -16
 ; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e32, m4, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 16
 ; CHECK-NEXT:    ret
   %res = call <vscale x 8 x i32> @llvm.experimental.vector.splice.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b, i32 -16)
@@ -930,7 +930,7 @@ define <vscale x 8 x i32> @splice_nxv8i32_offset_max(<vscale x 8 x i32> %a, <vsc
 ; CHECK-NEXT:    addi a0, a0, -15
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 15
-; CHECK-NEXT:    vsetvli a1, zero, e32, m4, tu, mu
+; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 8 x i32> @llvm.experimental.vector.splice.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b, i32 15)
@@ -955,7 +955,7 @@ define <vscale x 16 x i32> @splice_nxv16i32_offset_negone(<vscale x 16 x i32> %a
 ; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    vsetivli zero, 1, e32, m8, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e32, m8, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v16, 1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 16 x i32> @llvm.experimental.vector.splice.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b, i32 -1)
@@ -971,7 +971,7 @@ define <vscale x 16 x i32> @splice_nxv16i32_offset_min(<vscale x 16 x i32> %a, <
 ; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e32, m8, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v16, a1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 16 x i32> @llvm.experimental.vector.splice.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b, i32 -32)
@@ -986,7 +986,7 @@ define <vscale x 16 x i32> @splice_nxv16i32_offset_max(<vscale x 16 x i32> %a, <
 ; CHECK-NEXT:    addi a0, a0, -31
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 31
-; CHECK-NEXT:    vsetvli a1, zero, e32, m8, tu, mu
+; CHECK-NEXT:    vsetvli a1, zero, e32, m8, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v16, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 16 x i32> @llvm.experimental.vector.splice.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b, i32 31)
@@ -1011,7 +1011,7 @@ define <vscale x 1 x i64> @splice_nxv1i64_offset_negone(<vscale x 1 x i64> %a, <
 ; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e64, m1, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 1 x i64> @llvm.experimental.vector.splice.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %b, i32 -1)
@@ -1026,7 +1026,7 @@ define <vscale x 1 x i64> @splice_nxv1i64_offset_min(<vscale x 1 x i64> %a, <vsc
 ; CHECK-NEXT:    addi a0, a0, -2
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e64, m1, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 2
 ; CHECK-NEXT:    ret
   %res = call <vscale x 1 x i64> @llvm.experimental.vector.splice.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %b, i32 -2)
@@ -1041,7 +1041,7 @@ define <vscale x 1 x i64> @splice_nxv1i64_offset_max(<vscale x 1 x i64> %a, <vsc
 ; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 1
-; CHECK-NEXT:    vsetvli a1, zero, e64, m1, tu, mu
+; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 1 x i64> @llvm.experimental.vector.splice.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %b, i32 1)
@@ -1066,7 +1066,7 @@ define <vscale x 2 x i64> @splice_nxv2i64_offset_negone(<vscale x 2 x i64> %a, <
 ; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m2, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e64, m2, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 2 x i64> @llvm.experimental.vector.splice.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, i32 -1)
@@ -1081,7 +1081,7 @@ define <vscale x 2 x i64> @splice_nxv2i64_offset_min(<vscale x 2 x i64> %a, <vsc
 ; CHECK-NEXT:    addi a0, a0, -4
 ; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e64, m2, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 4
 ; CHECK-NEXT:    ret
   %res = call <vscale x 2 x i64> @llvm.experimental.vector.splice.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, i32 -4)
@@ -1096,7 +1096,7 @@ define <vscale x 2 x i64> @splice_nxv2i64_offset_max(<vscale x 2 x i64> %a, <vsc
 ; CHECK-NEXT:    addi a0, a0, -3
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 3
-; CHECK-NEXT:    vsetvli a1, zero, e64, m2, tu, mu
+; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 2 x i64> @llvm.experimental.vector.splice.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, i32 3)
@@ -1121,7 +1121,7 @@ define <vscale x 4 x i64> @splice_nxv4i64_offset_negone(<vscale x 4 x i64> %a, <
 ; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m4, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e64, m4, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 4 x i64> @llvm.experimental.vector.splice.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b, i32 -1)
@@ -1136,7 +1136,7 @@ define <vscale x 4 x i64> @splice_nxv4i64_offset_min(<vscale x 4 x i64> %a, <vsc
 ; CHECK-NEXT:    addi a0, a0, -8
 ; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e64, m4, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 8
 ; CHECK-NEXT:    ret
   %res = call <vscale x 4 x i64> @llvm.experimental.vector.splice.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b, i32 -8)
@@ -1151,7 +1151,7 @@ define <vscale x 4 x i64> @splice_nxv4i64_offset_max(<vscale x 4 x i64> %a, <vsc
 ; CHECK-NEXT:    addi a0, a0, -7
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 7
-; CHECK-NEXT:    vsetvli a1, zero, e64, m4, tu, mu
+; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 4 x i64> @llvm.experimental.vector.splice.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b, i32 7)
@@ -1175,7 +1175,7 @@ define <vscale x 8 x i64> @splice_nxv8i64_offset_negone(<vscale x 8 x i64> %a, <
 ; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m8, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e64, m8, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v16, 1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 8 x i64> @llvm.experimental.vector.splice.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b, i32 -1)
@@ -1189,7 +1189,7 @@ define <vscale x 8 x i64> @splice_nxv8i64_offset_min(<vscale x 8 x i64> %a, <vsc
 ; CHECK-NEXT:    addi a0, a0, -16
 ; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e64, m8, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v16, 16
 ; CHECK-NEXT:    ret
   %res = call <vscale x 8 x i64> @llvm.experimental.vector.splice.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b, i32 -16)
@@ -1203,7 +1203,7 @@ define <vscale x 8 x i64> @splice_nxv8i64_offset_max(<vscale x 8 x i64> %a, <vsc
 ; CHECK-NEXT:    addi a0, a0, -15
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 15
-; CHECK-NEXT:    vsetvli a1, zero, e64, m8, tu, mu
+; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v16, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 8 x i64> @llvm.experimental.vector.splice.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b, i32 15)
@@ -1228,7 +1228,7 @@ define <vscale x 1 x half> @splice_nxv1f16_offset_negone(<vscale x 1 x half> %a,
 ; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 1 x half> @llvm.experimental.vector.splice.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x half> %b, i32 -1)
@@ -1243,7 +1243,7 @@ define <vscale x 1 x half> @splice_nxv1f16_offset_min(<vscale x 1 x half> %a, <v
 ; CHECK-NEXT:    addi a0, a0, -2
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 2
 ; CHECK-NEXT:    ret
   %res = call <vscale x 1 x half> @llvm.experimental.vector.splice.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x half> %b, i32 -2)
@@ -1258,7 +1258,7 @@ define <vscale x 1 x half> @splice_nxv1f16_offset_max(<vscale x 1 x half> %a, <v
 ; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 1
-; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, tu, mu
+; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 1 x half> @llvm.experimental.vector.splice.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x half> %b, i32 1)
@@ -1283,7 +1283,7 @@ define <vscale x 2 x half> @splice_nxv2f16_offset_negone(<vscale x 2 x half> %a,
 ; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 2 x half> @llvm.experimental.vector.splice.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b, i32 -1)
@@ -1298,7 +1298,7 @@ define <vscale x 2 x half> @splice_nxv2f16_offset_min(<vscale x 2 x half> %a, <v
 ; CHECK-NEXT:    addi a0, a0, -4
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 4
 ; CHECK-NEXT:    ret
   %res = call <vscale x 2 x half> @llvm.experimental.vector.splice.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b, i32 -4)
@@ -1313,7 +1313,7 @@ define <vscale x 2 x half> @splice_nxv2f16_offset_max(<vscale x 2 x half> %a, <v
 ; CHECK-NEXT:    addi a0, a0, -3
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 3
-; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, tu, mu
+; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 2 x half> @llvm.experimental.vector.splice.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b, i32 3)
@@ -1338,7 +1338,7 @@ define <vscale x 4 x half> @splice_nxv4f16_offset_negone(<vscale x 4 x half> %a,
 ; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e16, m1, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 4 x half> @llvm.experimental.vector.splice.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b, i32 -1)
@@ -1353,7 +1353,7 @@ define <vscale x 4 x half> @splice_nxv4f16_offset_min(<vscale x 4 x half> %a, <v
 ; CHECK-NEXT:    addi a0, a0, -8
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e16, m1, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 8
 ; CHECK-NEXT:    ret
   %res = call <vscale x 4 x half> @llvm.experimental.vector.splice.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b, i32 -8)
@@ -1368,7 +1368,7 @@ define <vscale x 4 x half> @splice_nxv4f16_offset_max(<vscale x 4 x half> %a, <v
 ; CHECK-NEXT:    addi a0, a0, -7
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 7
-; CHECK-NEXT:    vsetvli a1, zero, e16, m1, tu, mu
+; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 4 x half> @llvm.experimental.vector.splice.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b, i32 7)
@@ -1392,7 +1392,7 @@ define <vscale x 8 x half> @splice_nxv8f16_offset_negone(<vscale x 8 x half> %a,
 ; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    vsetivli zero, 1, e16, m2, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e16, m2, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 8 x half> @llvm.experimental.vector.splice.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b, i32 -1)
@@ -1406,7 +1406,7 @@ define <vscale x 8 x half> @splice_nxv8f16_offset_min(<vscale x 8 x half> %a, <v
 ; CHECK-NEXT:    addi a0, a0, -16
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e16, m2, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 16
 ; CHECK-NEXT:    ret
   %res = call <vscale x 8 x half> @llvm.experimental.vector.splice.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b, i32 -16)
@@ -1420,7 +1420,7 @@ define <vscale x 8 x half> @splice_nxv8f16_offset_max(<vscale x 8 x half> %a, <v
 ; CHECK-NEXT:    addi a0, a0, -15
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 15
-; CHECK-NEXT:    vsetvli a1, zero, e16, m2, tu, mu
+; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 8 x half> @llvm.experimental.vector.splice.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b, i32 15)
@@ -1445,7 +1445,7 @@ define <vscale x 16 x half> @splice_nxv16f16_offset_negone(<vscale x 16 x half>
 ; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    vsetivli zero, 1, e16, m4, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e16, m4, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 16 x half> @llvm.experimental.vector.splice.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x half> %b, i32 -1)
@@ -1461,7 +1461,7 @@ define <vscale x 16 x half> @splice_nxv16f16_offset_min(<vscale x 16 x half> %a,
 ; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e16, m4, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 16 x half> @llvm.experimental.vector.splice.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x half> %b, i32 -32)
@@ -1476,7 +1476,7 @@ define <vscale x 16 x half> @splice_nxv16f16_offset_max(<vscale x 16 x half> %a,
 ; CHECK-NEXT:    addi a0, a0, -31
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 31
-; CHECK-NEXT:    vsetvli a1, zero, e16, m4, tu, mu
+; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 16 x half> @llvm.experimental.vector.splice.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x half> %b, i32 31)
@@ -1501,7 +1501,7 @@ define <vscale x 32 x half> @splice_nxv32f16_offset_negone(<vscale x 32 x half>
 ; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    vsetivli zero, 1, e16, m8, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e16, m8, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v16, 1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 32 x half> @llvm.experimental.vector.splice.nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x half> %b, i32 -1)
@@ -1517,7 +1517,7 @@ define <vscale x 32 x half> @splice_nxv32f16_offset_min(<vscale x 32 x half> %a,
 ; CHECK-NEXT:    li a1, 64
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e16, m8, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v16, a1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 32 x half> @llvm.experimental.vector.splice.nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x half> %b, i32 -64)
@@ -1533,7 +1533,7 @@ define <vscale x 32 x half> @splice_nxv32f16_offset_max(<vscale x 32 x half> %a,
 ; CHECK-NEXT:    li a1, 63
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a1
-; CHECK-NEXT:    vsetvli a1, zero, e16, m8, tu, mu
+; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v16, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 32 x half> @llvm.experimental.vector.splice.nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x half> %b, i32 63)
@@ -1558,7 +1558,7 @@ define <vscale x 1 x float> @splice_nxv1f32_offset_negone(<vscale x 1 x float> %
 ; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 1 x float> @llvm.experimental.vector.splice.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x float> %b, i32 -1)
@@ -1573,7 +1573,7 @@ define <vscale x 1 x float> @splice_nxv1f32_offset_min(<vscale x 1 x float> %a,
 ; CHECK-NEXT:    addi a0, a0, -2
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 2
 ; CHECK-NEXT:    ret
   %res = call <vscale x 1 x float> @llvm.experimental.vector.splice.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x float> %b, i32 -2)
@@ -1588,7 +1588,7 @@ define <vscale x 1 x float> @splice_nxv1f32_offset_max(<vscale x 1 x float> %a,
 ; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 1
-; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, tu, mu
+; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 1 x float> @llvm.experimental.vector.splice.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x float> %b, i32 1)
@@ -1613,7 +1613,7 @@ define <vscale x 2 x float> @splice_nxv2f32_offset_negone(<vscale x 2 x float> %
 ; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e32, m1, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 2 x float> @llvm.experimental.vector.splice.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b, i32 -1)
@@ -1628,7 +1628,7 @@ define <vscale x 2 x float> @splice_nxv2f32_offset_min(<vscale x 2 x float> %a,
 ; CHECK-NEXT:    addi a0, a0, -4
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e32, m1, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 4
 ; CHECK-NEXT:    ret
   %res = call <vscale x 2 x float> @llvm.experimental.vector.splice.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b, i32 -4)
@@ -1643,7 +1643,7 @@ define <vscale x 2 x float> @splice_nxv2f32_offset_max(<vscale x 2 x float> %a,
 ; CHECK-NEXT:    addi a0, a0, -3
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 3
-; CHECK-NEXT:    vsetvli a1, zero, e32, m1, tu, mu
+; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 2 x float> @llvm.experimental.vector.splice.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b, i32 3)
@@ -1668,7 +1668,7 @@ define <vscale x 4 x float> @splice_nxv4f32_offset_negone(<vscale x 4 x float> %
 ; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    vsetivli zero, 1, e32, m2, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e32, m2, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 4 x float> @llvm.experimental.vector.splice.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b, i32 -1)
@@ -1683,7 +1683,7 @@ define <vscale x 4 x float> @splice_nxv4f32_offset_min(<vscale x 4 x float> %a,
 ; CHECK-NEXT:    addi a0, a0, -8
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e32, m2, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 8
 ; CHECK-NEXT:    ret
   %res = call <vscale x 4 x float> @llvm.experimental.vector.splice.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b, i32 -8)
@@ -1698,7 +1698,7 @@ define <vscale x 4 x float> @splice_nxv4f32_offset_max(<vscale x 4 x float> %a,
 ; CHECK-NEXT:    addi a0, a0, -7
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 7
-; CHECK-NEXT:    vsetvli a1, zero, e32, m2, tu, mu
+; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 4 x float> @llvm.experimental.vector.splice.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b, i32 7)
@@ -1722,7 +1722,7 @@ define <vscale x 8 x float> @splice_nxv8f32_offset_negone(<vscale x 8 x float> %
 ; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    vsetivli zero, 1, e32, m4, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e32, m4, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 8 x float> @llvm.experimental.vector.splice.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x float> %b, i32 -1)
@@ -1736,7 +1736,7 @@ define <vscale x 8 x float> @splice_nxv8f32_offset_min(<vscale x 8 x float> %a,
 ; CHECK-NEXT:    addi a0, a0, -16
 ; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e32, m4, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 16
 ; CHECK-NEXT:    ret
   %res = call <vscale x 8 x float> @llvm.experimental.vector.splice.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x float> %b, i32 -16)
@@ -1750,7 +1750,7 @@ define <vscale x 8 x float> @splice_nxv8f32_offset_max(<vscale x 8 x float> %a,
 ; CHECK-NEXT:    addi a0, a0, -15
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 15
-; CHECK-NEXT:    vsetvli a1, zero, e32, m4, tu, mu
+; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 8 x float> @llvm.experimental.vector.splice.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x float> %b, i32 15)
@@ -1775,7 +1775,7 @@ define <vscale x 16 x float> @splice_nxv16f32_offset_negone(<vscale x 16 x float
 ; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    vsetivli zero, 1, e32, m8, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e32, m8, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v16, 1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 16 x float> @llvm.experimental.vector.splice.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x float> %b, i32 -1)
@@ -1791,7 +1791,7 @@ define <vscale x 16 x float> @splice_nxv16f32_offset_min(<vscale x 16 x float> %
 ; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e32, m8, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v16, a1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 16 x float> @llvm.experimental.vector.splice.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x float> %b, i32 -32)
@@ -1806,7 +1806,7 @@ define <vscale x 16 x float> @splice_nxv16f32_offset_max(<vscale x 16 x float> %
 ; CHECK-NEXT:    addi a0, a0, -31
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 31
-; CHECK-NEXT:    vsetvli a1, zero, e32, m8, tu, mu
+; CHECK-NEXT:    vsetvli a1, zero, e32, m8, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v16, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 16 x float> @llvm.experimental.vector.splice.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x float> %b, i32 31)
@@ -1831,7 +1831,7 @@ define <vscale x 1 x double> @splice_nxv1f64_offset_negone(<vscale x 1 x double>
 ; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e64, m1, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 1 x double> @llvm.experimental.vector.splice.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> %b, i32 -1)
@@ -1846,7 +1846,7 @@ define <vscale x 1 x double> @splice_nxv1f64_offset_min(<vscale x 1 x double> %a
 ; CHECK-NEXT:    addi a0, a0, -2
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e64, m1, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 2
 ; CHECK-NEXT:    ret
   %res = call <vscale x 1 x double> @llvm.experimental.vector.splice.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> %b, i32 -2)
@@ -1861,7 +1861,7 @@ define <vscale x 1 x double> @splice_nxv1f64_offset_max(<vscale x 1 x double> %a
 ; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 1
-; CHECK-NEXT:    vsetvli a1, zero, e64, m1, tu, mu
+; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 1 x double> @llvm.experimental.vector.splice.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> %b, i32 1)
@@ -1886,7 +1886,7 @@ define <vscale x 2 x double> @splice_nxv2f64_offset_negone(<vscale x 2 x double>
 ; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m2, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e64, m2, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 2 x double> @llvm.experimental.vector.splice.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b, i32 -1)
@@ -1901,7 +1901,7 @@ define <vscale x 2 x double> @splice_nxv2f64_offset_min(<vscale x 2 x double> %a
 ; CHECK-NEXT:    addi a0, a0, -4
 ; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e64, m2, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 4
 ; CHECK-NEXT:    ret
   %res = call <vscale x 2 x double> @llvm.experimental.vector.splice.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b, i32 -4)
@@ -1916,7 +1916,7 @@ define <vscale x 2 x double> @splice_nxv2f64_offset_max(<vscale x 2 x double> %a
 ; CHECK-NEXT:    addi a0, a0, -3
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 3
-; CHECK-NEXT:    vsetvli a1, zero, e64, m2, tu, mu
+; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 2 x double> @llvm.experimental.vector.splice.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b, i32 3)
@@ -1941,7 +1941,7 @@ define <vscale x 4 x double> @splice_nxv4f64_offset_negone(<vscale x 4 x double>
 ; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m4, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e64, m4, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 4 x double> @llvm.experimental.vector.splice.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x double> %b, i32 -1)
@@ -1956,7 +1956,7 @@ define <vscale x 4 x double> @splice_nxv4f64_offset_min(<vscale x 4 x double> %a
 ; CHECK-NEXT:    addi a0, a0, -8
 ; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e64, m4, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 8
 ; CHECK-NEXT:    ret
   %res = call <vscale x 4 x double> @llvm.experimental.vector.splice.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x double> %b, i32 -8)
@@ -1971,7 +1971,7 @@ define <vscale x 4 x double> @splice_nxv4f64_offset_max(<vscale x 4 x double> %a
 ; CHECK-NEXT:    addi a0, a0, -7
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 7
-; CHECK-NEXT:    vsetvli a1, zero, e64, m4, tu, mu
+; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 4 x double> @llvm.experimental.vector.splice.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x double> %b, i32 7)
@@ -1995,7 +1995,7 @@ define <vscale x 8 x double> @splice_nxv8f64_offset_negone(<vscale x 8 x double>
 ; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m8, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e64, m8, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v16, 1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 8 x double> @llvm.experimental.vector.splice.nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x double> %b, i32 -1)
@@ -2009,7 +2009,7 @@ define <vscale x 8 x double> @splice_nxv8f64_offset_min(<vscale x 8 x double> %a
 ; CHECK-NEXT:    addi a0, a0, -16
 ; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e64, m8, tu, mu
+; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v16, 16
 ; CHECK-NEXT:    ret
   %res = call <vscale x 8 x double> @llvm.experimental.vector.splice.nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x double> %b, i32 -16)
@@ -2023,7 +2023,7 @@ define <vscale x 8 x double> @splice_nxv8f64_offset_max(<vscale x 8 x double> %a
 ; CHECK-NEXT:    addi a0, a0, -15
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 15
-; CHECK-NEXT:    vsetvli a1, zero, e64, m8, tu, mu
+; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v16, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 8 x double> @llvm.experimental.vector.splice.nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x double> %b, i32 15)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv32.ll
index fca8c117477a5..05585597e29d2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv32.ll
@@ -5,12 +5,14 @@ declare <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   i32,
-  i32);
+  i32,
+  i32
+);
 
 define <vscale x 1 x i8> @intrinsic_vslidedown_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -18,7 +20,8 @@ entry:
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
     i32 %2,
-    i32 %3)
+    i32 %3,
+    i32 1)
 
   ret <vscale x 1 x i8> %a
 }
@@ -50,7 +53,7 @@ entry:
 define <vscale x 1 x i8> @intrinsic_vslidedown_vi_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -58,7 +61,8 @@ entry:
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
     i32 9,
-    i32 %2)
+    i32 %2,
+    i32 1)
 
   ret <vscale x 1 x i8> %a
 }
@@ -84,12 +88,14 @@ declare <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   i32,
-  i32);
+  i32,
+  i32
+);
 
 define <vscale x 2 x i8> @intrinsic_vslidedown_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -97,7 +103,8 @@ entry:
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
     i32 %2,
-    i32 %3)
+    i32 %3,
+    i32 1)
 
   ret <vscale x 2 x i8> %a
 }
@@ -129,7 +136,7 @@ entry:
 define <vscale x 2 x i8> @intrinsic_vslidedown_vi_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -137,7 +144,8 @@ entry:
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
     i32 9,
-    i32 %2)
+    i32 %2,
+    i32 1)
 
   ret <vscale x 2 x i8> %a
 }
@@ -163,12 +171,14 @@ declare <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   i32,
-  i32);
+  i32,
+  i32
+);
 
 define <vscale x 4 x i8> @intrinsic_vslidedown_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -176,7 +186,8 @@ entry:
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
     i32 %2,
-    i32 %3)
+    i32 %3,
+    i32 1)
 
   ret <vscale x 4 x i8> %a
 }
@@ -208,7 +219,7 @@ entry:
 define <vscale x 4 x i8> @intrinsic_vslidedown_vi_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -216,7 +227,8 @@ entry:
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
     i32 9,
-    i32 %2)
+    i32 %2,
+    i32 1)
 
   ret <vscale x 4 x i8> %a
 }
@@ -242,12 +254,14 @@ declare <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   i32,
-  i32);
+  i32,
+  i32
+);
 
 define <vscale x 8 x i8> @intrinsic_vslidedown_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -255,7 +269,8 @@ entry:
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
     i32 %2,
-    i32 %3)
+    i32 %3,
+    i32 1)
 
   ret <vscale x 8 x i8> %a
 }
@@ -287,7 +302,7 @@ entry:
 define <vscale x 8 x i8> @intrinsic_vslidedown_vi_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -295,7 +310,8 @@ entry:
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
     i32 9,
-    i32 %2)
+    i32 %2,
+    i32 1)
 
   ret <vscale x 8 x i8> %a
 }
@@ -321,12 +337,14 @@ declare <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   i32,
-  i32);
+  i32,
+  i32
+);
 
 define <vscale x 16 x i8> @intrinsic_vslidedown_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -334,7 +352,8 @@ entry:
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
     i32 %2,
-    i32 %3)
+    i32 %3,
+    i32 1)
 
   ret <vscale x 16 x i8> %a
 }
@@ -366,7 +385,7 @@ entry:
 define <vscale x 16 x i8> @intrinsic_vslidedown_vi_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -374,7 +393,8 @@ entry:
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
     i32 9,
-    i32 %2)
+    i32 %2,
+    i32 1)
 
   ret <vscale x 16 x i8> %a
 }
@@ -400,12 +420,14 @@ declare <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   i32,
-  i32);
+  i32,
+  i32
+);
 
 define <vscale x 32 x i8> @intrinsic_vslidedown_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -413,7 +435,8 @@ entry:
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
     i32 %2,
-    i32 %3)
+    i32 %3,
+    i32 1)
 
   ret <vscale x 32 x i8> %a
 }
@@ -445,7 +468,7 @@ entry:
 define <vscale x 32 x i8> @intrinsic_vslidedown_vi_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -453,7 +476,8 @@ entry:
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
     i32 9,
-    i32 %2)
+    i32 %2,
+    i32 1)
 
   ret <vscale x 32 x i8> %a
 }
@@ -479,12 +503,14 @@ declare <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   i32,
-  i32);
+  i32,
+  i32
+);
 
 define <vscale x 1 x i16> @intrinsic_vslidedown_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -492,7 +518,8 @@ entry:
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
     i32 %2,
-    i32 %3)
+    i32 %3,
+    i32 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -524,7 +551,7 @@ entry:
 define <vscale x 1 x i16> @intrinsic_vslidedown_vi_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -532,7 +559,8 @@ entry:
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
     i32 9,
-    i32 %2)
+    i32 %2,
+    i32 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -558,12 +586,14 @@ declare <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   i32,
-  i32);
+  i32,
+  i32
+);
 
 define <vscale x 2 x i16> @intrinsic_vslidedown_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -571,7 +601,8 @@ entry:
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
     i32 %2,
-    i32 %3)
+    i32 %3,
+    i32 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -603,7 +634,7 @@ entry:
 define <vscale x 2 x i16> @intrinsic_vslidedown_vi_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -611,7 +642,8 @@ entry:
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
     i32 9,
-    i32 %2)
+    i32 %2,
+    i32 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -637,12 +669,14 @@ declare <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   i32,
-  i32);
+  i32,
+  i32
+);
 
 define <vscale x 4 x i16> @intrinsic_vslidedown_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -650,7 +684,8 @@ entry:
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
     i32 %2,
-    i32 %3)
+    i32 %3,
+    i32 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -682,7 +717,7 @@ entry:
 define <vscale x 4 x i16> @intrinsic_vslidedown_vi_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -690,7 +725,8 @@ entry:
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
     i32 9,
-    i32 %2)
+    i32 %2,
+    i32 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -716,12 +752,14 @@ declare <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   i32,
-  i32);
+  i32,
+  i32
+);
 
 define <vscale x 8 x i16> @intrinsic_vslidedown_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -729,7 +767,8 @@ entry:
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
     i32 %2,
-    i32 %3)
+    i32 %3,
+    i32 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -761,7 +800,7 @@ entry:
 define <vscale x 8 x i16> @intrinsic_vslidedown_vi_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -769,7 +808,8 @@ entry:
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
     i32 9,
-    i32 %2)
+    i32 %2,
+    i32 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -795,12 +835,14 @@ declare <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   i32,
-  i32);
+  i32,
+  i32
+);
 
 define <vscale x 16 x i16> @intrinsic_vslidedown_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -808,7 +850,8 @@ entry:
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
     i32 %2,
-    i32 %3)
+    i32 %3,
+    i32 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -840,7 +883,7 @@ entry:
 define <vscale x 16 x i16> @intrinsic_vslidedown_vi_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -848,7 +891,8 @@ entry:
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
     i32 9,
-    i32 %2)
+    i32 %2,
+    i32 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -874,12 +918,14 @@ declare <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   i32,
-  i32);
+  i32,
+  i32
+);
 
 define <vscale x 1 x i32> @intrinsic_vslidedown_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -887,7 +933,8 @@ entry:
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
     i32 %2,
-    i32 %3)
+    i32 %3,
+    i32 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -919,7 +966,7 @@ entry:
 define <vscale x 1 x i32> @intrinsic_vslidedown_vi_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -927,7 +974,8 @@ entry:
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
     i32 9,
-    i32 %2)
+    i32 %2,
+    i32 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -953,12 +1001,14 @@ declare <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   i32,
-  i32);
+  i32,
+  i32
+);
 
 define <vscale x 2 x i32> @intrinsic_vslidedown_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -966,7 +1016,8 @@ entry:
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
     i32 %2,
-    i32 %3)
+    i32 %3,
+    i32 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -998,7 +1049,7 @@ entry:
 define <vscale x 2 x i32> @intrinsic_vslidedown_vi_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -1006,7 +1057,8 @@ entry:
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
     i32 9,
-    i32 %2)
+    i32 %2,
+    i32 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1032,12 +1084,14 @@ declare <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   i32,
-  i32);
+  i32,
+  i32
+);
 
 define <vscale x 4 x i32> @intrinsic_vslidedown_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1045,7 +1099,8 @@ entry:
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
     i32 %2,
-    i32 %3)
+    i32 %3,
+    i32 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1077,7 +1132,7 @@ entry:
 define <vscale x 4 x i32> @intrinsic_vslidedown_vi_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -1085,7 +1140,8 @@ entry:
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
     i32 9,
-    i32 %2)
+    i32 %2,
+    i32 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1111,12 +1167,14 @@ declare <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   i32,
-  i32);
+  i32,
+  i32
+);
 
 define <vscale x 8 x i32> @intrinsic_vslidedown_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1124,7 +1182,8 @@ entry:
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
     i32 %2,
-    i32 %3)
+    i32 %3,
+    i32 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1156,7 +1215,7 @@ entry:
 define <vscale x 8 x i32> @intrinsic_vslidedown_vi_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -1164,7 +1223,8 @@ entry:
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
     i32 9,
-    i32 %2)
+    i32 %2,
+    i32 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1190,12 +1250,14 @@ declare <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   i32,
-  i32);
+  i32,
+  i32
+);
 
 define <vscale x 1 x i64> @intrinsic_vslidedown_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1203,7 +1265,8 @@ entry:
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
     i32 %2,
-    i32 %3)
+    i32 %3,
+    i32 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1235,7 +1298,7 @@ entry:
 define <vscale x 1 x i64> @intrinsic_vslidedown_vi_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -1243,7 +1306,8 @@ entry:
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
     i32 9,
-    i32 %2)
+    i32 %2,
+    i32 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1269,12 +1333,14 @@ declare <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   i32,
-  i32);
+  i32,
+  i32
+);
 
 define <vscale x 2 x i64> @intrinsic_vslidedown_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1282,7 +1348,8 @@ entry:
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
     i32 %2,
-    i32 %3)
+    i32 %3,
+    i32 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1314,7 +1381,7 @@ entry:
 define <vscale x 2 x i64> @intrinsic_vslidedown_vi_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -1322,7 +1389,8 @@ entry:
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
     i32 9,
-    i32 %2)
+    i32 %2,
+    i32 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1348,12 +1416,14 @@ declare <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   i32,
-  i32);
+  i32,
+  i32
+);
 
 define <vscale x 4 x i64> @intrinsic_vslidedown_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1361,7 +1431,8 @@ entry:
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
     i32 %2,
-    i32 %3)
+    i32 %3,
+    i32 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -1393,7 +1464,7 @@ entry:
 define <vscale x 4 x i64> @intrinsic_vslidedown_vi_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -1401,7 +1472,8 @@ entry:
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
     i32 9,
-    i32 %2)
+    i32 %2,
+    i32 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -1427,12 +1499,14 @@ declare <vscale x 1 x half> @llvm.riscv.vslidedown.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   i32,
-  i32);
+  i32,
+  i32
+);
 
 define <vscale x 1 x half> @intrinsic_vslidedown_vx_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1440,7 +1514,8 @@ entry:
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
     i32 %2,
-    i32 %3)
+    i32 %3,
+    i32 1)
 
   ret <vscale x 1 x half> %a
 }
@@ -1472,7 +1547,7 @@ entry:
 define <vscale x 1 x half> @intrinsic_vslidedown_vi_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -1480,7 +1555,8 @@ entry:
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
     i32 9,
-    i32 %2)
+    i32 %2,
+    i32 1)
 
   ret <vscale x 1 x half> %a
 }
@@ -1506,12 +1582,14 @@ declare <vscale x 2 x half> @llvm.riscv.vslidedown.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   i32,
-  i32);
+  i32,
+  i32
+);
 
 define <vscale x 2 x half> @intrinsic_vslidedown_vx_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1519,7 +1597,8 @@ entry:
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
     i32 %2,
-    i32 %3)
+    i32 %3,
+    i32 1)
 
   ret <vscale x 2 x half> %a
 }
@@ -1551,7 +1630,7 @@ entry:
 define <vscale x 2 x half> @intrinsic_vslidedown_vi_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -1559,7 +1638,8 @@ entry:
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
     i32 9,
-    i32 %2)
+    i32 %2,
+    i32 1)
 
   ret <vscale x 2 x half> %a
 }
@@ -1585,12 +1665,14 @@ declare <vscale x 4 x half> @llvm.riscv.vslidedown.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   i32,
-  i32);
+  i32,
+  i32
+);
 
 define <vscale x 4 x half> @intrinsic_vslidedown_vx_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1598,7 +1680,8 @@ entry:
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
     i32 %2,
-    i32 %3)
+    i32 %3,
+    i32 1)
 
   ret <vscale x 4 x half> %a
 }
@@ -1630,7 +1713,7 @@ entry:
 define <vscale x 4 x half> @intrinsic_vslidedown_vi_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -1638,7 +1721,8 @@ entry:
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
     i32 9,
-    i32 %2)
+    i32 %2,
+    i32 1)
 
   ret <vscale x 4 x half> %a
 }
@@ -1664,12 +1748,14 @@ declare <vscale x 8 x half> @llvm.riscv.vslidedown.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   i32,
-  i32);
+  i32,
+  i32
+);
 
 define <vscale x 8 x half> @intrinsic_vslidedown_vx_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1677,7 +1763,8 @@ entry:
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
     i32 %2,
-    i32 %3)
+    i32 %3,
+    i32 1)
 
   ret <vscale x 8 x half> %a
 }
@@ -1709,7 +1796,7 @@ entry:
 define <vscale x 8 x half> @intrinsic_vslidedown_vi_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -1717,7 +1804,8 @@ entry:
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
     i32 9,
-    i32 %2)
+    i32 %2,
+    i32 1)
 
   ret <vscale x 8 x half> %a
 }
@@ -1743,12 +1831,14 @@ declare <vscale x 16 x half> @llvm.riscv.vslidedown.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   i32,
-  i32);
+  i32,
+  i32
+);
 
 define <vscale x 16 x half> @intrinsic_vslidedown_vx_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1756,7 +1846,8 @@ entry:
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
     i32 %2,
-    i32 %3)
+    i32 %3,
+    i32 1)
 
   ret <vscale x 16 x half> %a
 }
@@ -1788,7 +1879,7 @@ entry:
 define <vscale x 16 x half> @intrinsic_vslidedown_vi_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -1796,7 +1887,8 @@ entry:
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
     i32 9,
-    i32 %2)
+    i32 %2,
+    i32 1)
 
   ret <vscale x 16 x half> %a
 }
@@ -1822,12 +1914,14 @@ declare <vscale x 1 x float> @llvm.riscv.vslidedown.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   i32,
-  i32);
+  i32,
+  i32
+);
 
 define <vscale x 1 x float> @intrinsic_vslidedown_vx_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1835,7 +1929,8 @@ entry:
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
     i32 %2,
-    i32 %3)
+    i32 %3,
+    i32 1)
 
   ret <vscale x 1 x float> %a
 }
@@ -1867,7 +1962,7 @@ entry:
 define <vscale x 1 x float> @intrinsic_vslidedown_vi_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -1875,7 +1970,8 @@ entry:
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
     i32 9,
-    i32 %2)
+    i32 %2,
+    i32 1)
 
   ret <vscale x 1 x float> %a
 }
@@ -1901,12 +1997,14 @@ declare <vscale x 2 x float> @llvm.riscv.vslidedown.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   i32,
-  i32);
+  i32,
+  i32
+);
 
 define <vscale x 2 x float> @intrinsic_vslidedown_vx_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1914,7 +2012,8 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
     i32 %2,
-    i32 %3)
+    i32 %3,
+    i32 1)
 
   ret <vscale x 2 x float> %a
 }
@@ -1946,7 +2045,7 @@ entry:
 define <vscale x 2 x float> @intrinsic_vslidedown_vi_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -1954,7 +2053,8 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
     i32 9,
-    i32 %2)
+    i32 %2,
+    i32 1)
 
   ret <vscale x 2 x float> %a
 }
@@ -1980,12 +2080,14 @@ declare <vscale x 4 x float> @llvm.riscv.vslidedown.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   i32,
-  i32);
+  i32,
+  i32
+);
 
 define <vscale x 4 x float> @intrinsic_vslidedown_vx_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1993,7 +2095,8 @@ entry:
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
     i32 %2,
-    i32 %3)
+    i32 %3,
+    i32 1)
 
   ret <vscale x 4 x float> %a
 }
@@ -2025,7 +2128,7 @@ entry:
 define <vscale x 4 x float> @intrinsic_vslidedown_vi_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -2033,7 +2136,8 @@ entry:
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
     i32 9,
-    i32 %2)
+    i32 %2,
+    i32 1)
 
   ret <vscale x 4 x float> %a
 }
@@ -2059,12 +2163,14 @@ declare <vscale x 8 x float> @llvm.riscv.vslidedown.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   i32,
-  i32);
+  i32,
+  i32
+);
 
 define <vscale x 8 x float> @intrinsic_vslidedown_vx_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -2072,7 +2178,8 @@ entry:
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
     i32 %2,
-    i32 %3)
+    i32 %3,
+    i32 1)
 
   ret <vscale x 8 x float> %a
 }
@@ -2104,7 +2211,7 @@ entry:
 define <vscale x 8 x float> @intrinsic_vslidedown_vi_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -2112,7 +2219,8 @@ entry:
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
     i32 9,
-    i32 %2)
+    i32 %2,
+    i32 1)
 
   ret <vscale x 8 x float> %a
 }
@@ -2138,12 +2246,14 @@ declare <vscale x 1 x double> @llvm.riscv.vslidedown.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
   i32,
-  i32);
+  i32,
+  i32
+);
 
 define <vscale x 1 x double> @intrinsic_vslidedown_vx_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i32 %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -2151,7 +2261,8 @@ entry:
     <vscale x 1 x double> %0,
     <vscale x 1 x double> %1,
     i32 %2,
-    i32 %3)
+    i32 %3,
+    i32 1)
 
   ret <vscale x 1 x double> %a
 }
@@ -2183,7 +2294,7 @@ entry:
 define <vscale x 1 x double> @intrinsic_vslidedown_vi_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -2191,7 +2302,8 @@ entry:
     <vscale x 1 x double> %0,
     <vscale x 1 x double> %1,
     i32 9,
-    i32 %2)
+    i32 %2,
+    i32 1)
 
   ret <vscale x 1 x double> %a
 }
@@ -2217,12 +2329,14 @@ declare <vscale x 2 x double> @llvm.riscv.vslidedown.nxv2f64(
   <vscale x 2 x double>,
   <vscale x 2 x double>,
   i32,
-  i32);
+  i32,
+  i32
+);
 
 define <vscale x 2 x double> @intrinsic_vslidedown_vx_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i32 %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -2230,7 +2344,8 @@ entry:
     <vscale x 2 x double> %0,
     <vscale x 2 x double> %1,
     i32 %2,
-    i32 %3)
+    i32 %3,
+    i32 1)
 
   ret <vscale x 2 x double> %a
 }
@@ -2262,7 +2377,7 @@ entry:
 define <vscale x 2 x double> @intrinsic_vslidedown_vi_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -2270,7 +2385,8 @@ entry:
     <vscale x 2 x double> %0,
     <vscale x 2 x double> %1,
     i32 9,
-    i32 %2)
+    i32 %2,
+    i32 1)
 
   ret <vscale x 2 x double> %a
 }
@@ -2296,12 +2412,14 @@ declare <vscale x 4 x double> @llvm.riscv.vslidedown.nxv4f64(
   <vscale x 4 x double>,
   <vscale x 4 x double>,
   i32,
-  i32);
+  i32,
+  i32
+);
 
 define <vscale x 4 x double> @intrinsic_vslidedown_vx_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i32 %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -2309,7 +2427,8 @@ entry:
     <vscale x 4 x double> %0,
     <vscale x 4 x double> %1,
     i32 %2,
-    i32 %3)
+    i32 %3,
+    i32 1)
 
   ret <vscale x 4 x double> %a
 }
@@ -2341,7 +2460,7 @@ entry:
 define <vscale x 4 x double> @intrinsic_vslidedown_vi_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -2349,7 +2468,8 @@ entry:
     <vscale x 4 x double> %0,
     <vscale x 4 x double> %1,
     i32 9,
-    i32 %2)
+    i32 %2,
+    i32 1)
 
   ret <vscale x 4 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv64.ll
index df4d2ebb1e770..46f549ac87544 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv64.ll
@@ -5,12 +5,14 @@ declare <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   i64,
-  i64);
+  i64,
+  i64
+);
 
 define <vscale x 1 x i8> @intrinsic_vslidedown_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -18,7 +20,8 @@ entry:
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
     i64 %2,
-    i64 %3)
+    i64 %3,
+    i64 1)
 
   ret <vscale x 1 x i8> %a
 }
@@ -50,7 +53,7 @@ entry:
 define <vscale x 1 x i8> @intrinsic_vslidedown_vi_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -58,7 +61,8 @@ entry:
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
     i64 9,
-    i64 %2)
+    i64 %2,
+    i64 1)
 
   ret <vscale x 1 x i8> %a
 }
@@ -84,12 +88,14 @@ declare <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   i64,
-  i64);
+  i64,
+  i64
+);
 
 define <vscale x 2 x i8> @intrinsic_vslidedown_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -97,7 +103,8 @@ entry:
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
     i64 %2,
-    i64 %3)
+    i64 %3,
+    i64 1)
 
   ret <vscale x 2 x i8> %a
 }
@@ -129,7 +136,7 @@ entry:
 define <vscale x 2 x i8> @intrinsic_vslidedown_vi_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -137,7 +144,8 @@ entry:
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
     i64 9,
-    i64 %2)
+    i64 %2,
+    i64 1)
 
   ret <vscale x 2 x i8> %a
 }
@@ -163,12 +171,14 @@ declare <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   i64,
-  i64);
+  i64,
+  i64
+);
 
 define <vscale x 4 x i8> @intrinsic_vslidedown_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -176,7 +186,8 @@ entry:
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
     i64 %2,
-    i64 %3)
+    i64 %3,
+    i64 1)
 
   ret <vscale x 4 x i8> %a
 }
@@ -208,7 +219,7 @@ entry:
 define <vscale x 4 x i8> @intrinsic_vslidedown_vi_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -216,7 +227,8 @@ entry:
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
     i64 9,
-    i64 %2)
+    i64 %2,
+    i64 1)
 
   ret <vscale x 4 x i8> %a
 }
@@ -242,12 +254,14 @@ declare <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   i64,
-  i64);
+  i64,
+  i64
+);
 
 define <vscale x 8 x i8> @intrinsic_vslidedown_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -255,7 +269,8 @@ entry:
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
     i64 %2,
-    i64 %3)
+    i64 %3,
+    i64 1)
 
   ret <vscale x 8 x i8> %a
 }
@@ -287,7 +302,7 @@ entry:
 define <vscale x 8 x i8> @intrinsic_vslidedown_vi_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -295,7 +310,8 @@ entry:
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
     i64 9,
-    i64 %2)
+    i64 %2,
+    i64 1)
 
   ret <vscale x 8 x i8> %a
 }
@@ -321,12 +337,14 @@ declare <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   i64,
-  i64);
+  i64,
+  i64
+);
 
 define <vscale x 16 x i8> @intrinsic_vslidedown_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -334,7 +352,8 @@ entry:
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
     i64 %2,
-    i64 %3)
+    i64 %3,
+    i64 1)
 
   ret <vscale x 16 x i8> %a
 }
@@ -366,7 +385,7 @@ entry:
 define <vscale x 16 x i8> @intrinsic_vslidedown_vi_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -374,7 +393,8 @@ entry:
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
     i64 9,
-    i64 %2)
+    i64 %2,
+    i64 1)
 
   ret <vscale x 16 x i8> %a
 }
@@ -400,12 +420,14 @@ declare <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   i64,
-  i64);
+  i64,
+  i64
+);
 
 define <vscale x 32 x i8> @intrinsic_vslidedown_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -413,7 +435,8 @@ entry:
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
     i64 %2,
-    i64 %3)
+    i64 %3,
+    i64 1)
 
   ret <vscale x 32 x i8> %a
 }
@@ -445,7 +468,7 @@ entry:
 define <vscale x 32 x i8> @intrinsic_vslidedown_vi_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -453,7 +476,8 @@ entry:
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
     i64 9,
-    i64 %2)
+    i64 %2,
+    i64 1)
 
   ret <vscale x 32 x i8> %a
 }
@@ -479,12 +503,14 @@ declare <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   i64,
-  i64);
+  i64,
+  i64
+);
 
 define <vscale x 1 x i16> @intrinsic_vslidedown_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -492,7 +518,8 @@ entry:
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
     i64 %2,
-    i64 %3)
+    i64 %3,
+    i64 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -524,7 +551,7 @@ entry:
 define <vscale x 1 x i16> @intrinsic_vslidedown_vi_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -532,7 +559,8 @@ entry:
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
     i64 9,
-    i64 %2)
+    i64 %2,
+    i64 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -558,12 +586,14 @@ declare <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   i64,
-  i64);
+  i64,
+  i64
+);
 
 define <vscale x 2 x i16> @intrinsic_vslidedown_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -571,7 +601,8 @@ entry:
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
     i64 %2,
-    i64 %3)
+    i64 %3,
+    i64 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -603,7 +634,7 @@ entry:
 define <vscale x 2 x i16> @intrinsic_vslidedown_vi_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -611,7 +642,8 @@ entry:
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
     i64 9,
-    i64 %2)
+    i64 %2,
+    i64 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -637,12 +669,14 @@ declare <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   i64,
-  i64);
+  i64,
+  i64
+);
 
 define <vscale x 4 x i16> @intrinsic_vslidedown_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -650,7 +684,8 @@ entry:
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
     i64 %2,
-    i64 %3)
+    i64 %3,
+    i64 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -682,7 +717,7 @@ entry:
 define <vscale x 4 x i16> @intrinsic_vslidedown_vi_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -690,7 +725,8 @@ entry:
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
     i64 9,
-    i64 %2)
+    i64 %2,
+    i64 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -716,12 +752,14 @@ declare <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   i64,
-  i64);
+  i64,
+  i64
+);
 
 define <vscale x 8 x i16> @intrinsic_vslidedown_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -729,7 +767,8 @@ entry:
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
     i64 %2,
-    i64 %3)
+    i64 %3,
+    i64 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -761,7 +800,7 @@ entry:
 define <vscale x 8 x i16> @intrinsic_vslidedown_vi_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -769,7 +808,8 @@ entry:
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
     i64 9,
-    i64 %2)
+    i64 %2,
+    i64 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -795,12 +835,14 @@ declare <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   i64,
-  i64);
+  i64,
+  i64
+);
 
 define <vscale x 16 x i16> @intrinsic_vslidedown_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -808,7 +850,8 @@ entry:
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
     i64 %2,
-    i64 %3)
+    i64 %3,
+    i64 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -840,7 +883,7 @@ entry:
 define <vscale x 16 x i16> @intrinsic_vslidedown_vi_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -848,7 +891,8 @@ entry:
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
     i64 9,
-    i64 %2)
+    i64 %2,
+    i64 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -874,12 +918,14 @@ declare <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   i64,
-  i64);
+  i64,
+  i64
+);
 
 define <vscale x 1 x i32> @intrinsic_vslidedown_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -887,7 +933,8 @@ entry:
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
     i64 %2,
-    i64 %3)
+    i64 %3,
+    i64 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -919,7 +966,7 @@ entry:
 define <vscale x 1 x i32> @intrinsic_vslidedown_vi_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -927,7 +974,8 @@ entry:
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
     i64 9,
-    i64 %2)
+    i64 %2,
+    i64 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -953,12 +1001,14 @@ declare <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   i64,
-  i64);
+  i64,
+  i64
+);
 
 define <vscale x 2 x i32> @intrinsic_vslidedown_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -966,7 +1016,8 @@ entry:
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
     i64 %2,
-    i64 %3)
+    i64 %3,
+    i64 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -998,7 +1049,7 @@ entry:
 define <vscale x 2 x i32> @intrinsic_vslidedown_vi_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -1006,7 +1057,8 @@ entry:
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
     i64 9,
-    i64 %2)
+    i64 %2,
+    i64 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1032,12 +1084,14 @@ declare <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   i64,
-  i64);
+  i64,
+  i64
+);
 
 define <vscale x 4 x i32> @intrinsic_vslidedown_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1045,7 +1099,8 @@ entry:
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
     i64 %2,
-    i64 %3)
+    i64 %3,
+    i64 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1077,7 +1132,7 @@ entry:
 define <vscale x 4 x i32> @intrinsic_vslidedown_vi_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -1085,7 +1140,8 @@ entry:
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
     i64 9,
-    i64 %2)
+    i64 %2,
+    i64 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1111,12 +1167,14 @@ declare <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   i64,
-  i64);
+  i64,
+  i64
+);
 
 define <vscale x 8 x i32> @intrinsic_vslidedown_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1124,7 +1182,8 @@ entry:
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
     i64 %2,
-    i64 %3)
+    i64 %3,
+    i64 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1156,7 +1215,7 @@ entry:
 define <vscale x 8 x i32> @intrinsic_vslidedown_vi_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -1164,7 +1223,8 @@ entry:
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
     i64 9,
-    i64 %2)
+    i64 %2,
+    i64 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1190,12 +1250,14 @@ declare <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   i64,
-  i64);
+  i64,
+  i64
+);
 
 define <vscale x 1 x i64> @intrinsic_vslidedown_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1203,7 +1265,8 @@ entry:
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
     i64 %2,
-    i64 %3)
+    i64 %3,
+    i64 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1235,7 +1298,7 @@ entry:
 define <vscale x 1 x i64> @intrinsic_vslidedown_vi_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -1243,7 +1306,8 @@ entry:
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
     i64 9,
-    i64 %2)
+    i64 %2,
+    i64 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1269,12 +1333,14 @@ declare <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   i64,
-  i64);
+  i64,
+  i64
+);
 
 define <vscale x 2 x i64> @intrinsic_vslidedown_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1282,7 +1348,8 @@ entry:
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
     i64 %2,
-    i64 %3)
+    i64 %3,
+    i64 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1314,7 +1381,7 @@ entry:
 define <vscale x 2 x i64> @intrinsic_vslidedown_vi_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -1322,7 +1389,8 @@ entry:
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
     i64 9,
-    i64 %2)
+    i64 %2,
+    i64 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1348,12 +1416,14 @@ declare <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   i64,
-  i64);
+  i64,
+  i64
+);
 
 define <vscale x 4 x i64> @intrinsic_vslidedown_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1361,7 +1431,8 @@ entry:
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
     i64 %2,
-    i64 %3)
+    i64 %3,
+    i64 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -1393,7 +1464,7 @@ entry:
 define <vscale x 4 x i64> @intrinsic_vslidedown_vi_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -1401,7 +1472,8 @@ entry:
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
     i64 9,
-    i64 %2)
+    i64 %2,
+    i64 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -1427,12 +1499,14 @@ declare <vscale x 1 x half> @llvm.riscv.vslidedown.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   i64,
-  i64);
+  i64,
+  i64
+);
 
 define <vscale x 1 x half> @intrinsic_vslidedown_vx_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i64 %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1440,7 +1514,8 @@ entry:
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
     i64 %2,
-    i64 %3)
+    i64 %3,
+    i64 1)
 
   ret <vscale x 1 x half> %a
 }
@@ -1472,7 +1547,7 @@ entry:
 define <vscale x 1 x half> @intrinsic_vslidedown_vi_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -1480,7 +1555,8 @@ entry:
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
     i64 9,
-    i64 %2)
+    i64 %2,
+    i64 1)
 
   ret <vscale x 1 x half> %a
 }
@@ -1506,12 +1582,14 @@ declare <vscale x 2 x half> @llvm.riscv.vslidedown.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   i64,
-  i64);
+  i64,
+  i64
+);
 
 define <vscale x 2 x half> @intrinsic_vslidedown_vx_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i64 %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1519,7 +1597,8 @@ entry:
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
     i64 %2,
-    i64 %3)
+    i64 %3,
+    i64 1)
 
   ret <vscale x 2 x half> %a
 }
@@ -1551,7 +1630,7 @@ entry:
 define <vscale x 2 x half> @intrinsic_vslidedown_vi_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -1559,7 +1638,8 @@ entry:
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
     i64 9,
-    i64 %2)
+    i64 %2,
+    i64 1)
 
   ret <vscale x 2 x half> %a
 }
@@ -1585,12 +1665,14 @@ declare <vscale x 4 x half> @llvm.riscv.vslidedown.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   i64,
-  i64);
+  i64,
+  i64
+);
 
 define <vscale x 4 x half> @intrinsic_vslidedown_vx_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i64 %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1598,7 +1680,8 @@ entry:
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
     i64 %2,
-    i64 %3)
+    i64 %3,
+    i64 1)
 
   ret <vscale x 4 x half> %a
 }
@@ -1630,7 +1713,7 @@ entry:
 define <vscale x 4 x half> @intrinsic_vslidedown_vi_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -1638,7 +1721,8 @@ entry:
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
     i64 9,
-    i64 %2)
+    i64 %2,
+    i64 1)
 
   ret <vscale x 4 x half> %a
 }
@@ -1664,12 +1748,14 @@ declare <vscale x 8 x half> @llvm.riscv.vslidedown.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   i64,
-  i64);
+  i64,
+  i64
+);
 
 define <vscale x 8 x half> @intrinsic_vslidedown_vx_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i64 %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1677,7 +1763,8 @@ entry:
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
     i64 %2,
-    i64 %3)
+    i64 %3,
+    i64 1)
 
   ret <vscale x 8 x half> %a
 }
@@ -1709,7 +1796,7 @@ entry:
 define <vscale x 8 x half> @intrinsic_vslidedown_vi_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -1717,7 +1804,8 @@ entry:
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
     i64 9,
-    i64 %2)
+    i64 %2,
+    i64 1)
 
   ret <vscale x 8 x half> %a
 }
@@ -1743,12 +1831,14 @@ declare <vscale x 16 x half> @llvm.riscv.vslidedown.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   i64,
-  i64);
+  i64,
+  i64
+);
 
 define <vscale x 16 x half> @intrinsic_vslidedown_vx_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i64 %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1756,7 +1846,8 @@ entry:
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
     i64 %2,
-    i64 %3)
+    i64 %3,
+    i64 1)
 
   ret <vscale x 16 x half> %a
 }
@@ -1788,7 +1879,7 @@ entry:
 define <vscale x 16 x half> @intrinsic_vslidedown_vi_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -1796,7 +1887,8 @@ entry:
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
     i64 9,
-    i64 %2)
+    i64 %2,
+    i64 1)
 
   ret <vscale x 16 x half> %a
 }
@@ -1822,12 +1914,14 @@ declare <vscale x 1 x float> @llvm.riscv.vslidedown.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   i64,
-  i64);
+  i64,
+  i64
+);
 
 define <vscale x 1 x float> @intrinsic_vslidedown_vx_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i64 %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1835,7 +1929,8 @@ entry:
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
     i64 %2,
-    i64 %3)
+    i64 %3,
+    i64 1)
 
   ret <vscale x 1 x float> %a
 }
@@ -1867,7 +1962,7 @@ entry:
 define <vscale x 1 x float> @intrinsic_vslidedown_vi_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -1875,7 +1970,8 @@ entry:
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
     i64 9,
-    i64 %2)
+    i64 %2,
+    i64 1)
 
   ret <vscale x 1 x float> %a
 }
@@ -1901,12 +1997,14 @@ declare <vscale x 2 x float> @llvm.riscv.vslidedown.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   i64,
-  i64);
+  i64,
+  i64
+);
 
 define <vscale x 2 x float> @intrinsic_vslidedown_vx_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i64 %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1914,7 +2012,8 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
     i64 %2,
-    i64 %3)
+    i64 %3,
+    i64 1)
 
   ret <vscale x 2 x float> %a
 }
@@ -1946,7 +2045,7 @@ entry:
 define <vscale x 2 x float> @intrinsic_vslidedown_vi_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -1954,7 +2053,8 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
     i64 9,
-    i64 %2)
+    i64 %2,
+    i64 1)
 
   ret <vscale x 2 x float> %a
 }
@@ -1980,12 +2080,14 @@ declare <vscale x 4 x float> @llvm.riscv.vslidedown.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   i64,
-  i64);
+  i64,
+  i64
+);
 
 define <vscale x 4 x float> @intrinsic_vslidedown_vx_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i64 %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1993,7 +2095,8 @@ entry:
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
     i64 %2,
-    i64 %3)
+    i64 %3,
+    i64 1)
 
   ret <vscale x 4 x float> %a
 }
@@ -2025,7 +2128,7 @@ entry:
 define <vscale x 4 x float> @intrinsic_vslidedown_vi_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -2033,7 +2136,8 @@ entry:
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
     i64 9,
-    i64 %2)
+    i64 %2,
+    i64 1)
 
   ret <vscale x 4 x float> %a
 }
@@ -2059,12 +2163,14 @@ declare <vscale x 8 x float> @llvm.riscv.vslidedown.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   i64,
-  i64);
+  i64,
+  i64
+);
 
 define <vscale x 8 x float> @intrinsic_vslidedown_vx_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i64 %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -2072,7 +2178,8 @@ entry:
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
     i64 %2,
-    i64 %3)
+    i64 %3,
+    i64 1)
 
   ret <vscale x 8 x float> %a
 }
@@ -2104,7 +2211,7 @@ entry:
 define <vscale x 8 x float> @intrinsic_vslidedown_vi_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -2112,7 +2219,8 @@ entry:
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
     i64 9,
-    i64 %2)
+    i64 %2,
+    i64 1)
 
   ret <vscale x 8 x float> %a
 }
@@ -2138,12 +2246,14 @@ declare <vscale x 1 x double> @llvm.riscv.vslidedown.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
   i64,
-  i64);
+  i64,
+  i64
+);
 
 define <vscale x 1 x double> @intrinsic_vslidedown_vx_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i64 %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -2151,7 +2261,8 @@ entry:
     <vscale x 1 x double> %0,
     <vscale x 1 x double> %1,
     i64 %2,
-    i64 %3)
+    i64 %3,
+    i64 1)
 
   ret <vscale x 1 x double> %a
 }
@@ -2183,7 +2294,7 @@ entry:
 define <vscale x 1 x double> @intrinsic_vslidedown_vi_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -2191,7 +2302,8 @@ entry:
     <vscale x 1 x double> %0,
     <vscale x 1 x double> %1,
     i64 9,
-    i64 %2)
+    i64 %2,
+    i64 1)
 
   ret <vscale x 1 x double> %a
 }
@@ -2217,12 +2329,14 @@ declare <vscale x 2 x double> @llvm.riscv.vslidedown.nxv2f64(
   <vscale x 2 x double>,
   <vscale x 2 x double>,
   i64,
-  i64);
+  i64,
+  i64
+);
 
 define <vscale x 2 x double> @intrinsic_vslidedown_vx_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i64 %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -2230,7 +2344,8 @@ entry:
     <vscale x 2 x double> %0,
     <vscale x 2 x double> %1,
     i64 %2,
-    i64 %3)
+    i64 %3,
+    i64 1)
 
   ret <vscale x 2 x double> %a
 }
@@ -2262,7 +2377,7 @@ entry:
 define <vscale x 2 x double> @intrinsic_vslidedown_vi_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -2270,7 +2385,8 @@ entry:
     <vscale x 2 x double> %0,
     <vscale x 2 x double> %1,
     i64 9,
-    i64 %2)
+    i64 %2,
+    i64 1)
 
   ret <vscale x 2 x double> %a
 }
@@ -2296,12 +2412,14 @@ declare <vscale x 4 x double> @llvm.riscv.vslidedown.nxv4f64(
   <vscale x 4 x double>,
   <vscale x 4 x double>,
   i64,
-  i64);
+  i64,
+  i64
+);
 
 define <vscale x 4 x double> @intrinsic_vslidedown_vx_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i64 %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -2309,7 +2427,8 @@ entry:
     <vscale x 4 x double> %0,
     <vscale x 4 x double> %1,
     i64 %2,
-    i64 %3)
+    i64 %3,
+    i64 1)
 
   ret <vscale x 4 x double> %a
 }
@@ -2341,7 +2460,7 @@ entry:
 define <vscale x 4 x double> @intrinsic_vslidedown_vi_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -2349,7 +2468,8 @@ entry:
     <vscale x 4 x double> %0,
     <vscale x 4 x double> %1,
     i64 9,
-    i64 %2)
+    i64 %2,
+    i64 1)
 
   ret <vscale x 4 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vslideup-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vslideup-rv32.ll
index 39a515b24a421..b701e954e8fe3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vslideup-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vslideup-rv32.ll
@@ -5,12 +5,14 @@ declare <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   i32,
-  i32);
+  i32,
+  i32
+);
 
 define <vscale x 1 x i8> @intrinsic_vslideup_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -18,7 +20,8 @@ entry:
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
     i32 %2,
-    i32 %3)
+    i32 %3,
+    i32 1)
 
   ret <vscale x 1 x i8> %a
 }
@@ -50,7 +53,7 @@ entry:
 define <vscale x 1 x i8> @intrinsic_vslideup_vi_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -58,7 +61,8 @@ entry:
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
     i32 9,
-    i32 %2)
+    i32 %2,
+    i32 1)
 
   ret <vscale x 1 x i8> %a
 }
@@ -84,12 +88,14 @@ declare <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   i32,
-  i32);
+  i32,
+  i32
+);
 
 define <vscale x 2 x i8> @intrinsic_vslideup_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -97,7 +103,8 @@ entry:
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
     i32 %2,
-    i32 %3)
+    i32 %3,
+    i32 1)
 
   ret <vscale x 2 x i8> %a
 }
@@ -129,7 +136,7 @@ entry:
 define <vscale x 2 x i8> @intrinsic_vslideup_vi_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -137,7 +144,8 @@ entry:
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
     i32 9,
-    i32 %2)
+    i32 %2,
+    i32 1)
 
   ret <vscale x 2 x i8> %a
 }
@@ -163,12 +171,14 @@ declare <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   i32,
-  i32);
+  i32,
+  i32
+);
 
 define <vscale x 4 x i8> @intrinsic_vslideup_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -176,7 +186,8 @@ entry:
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
     i32 %2,
-    i32 %3)
+    i32 %3,
+    i32 1)
 
   ret <vscale x 4 x i8> %a
 }
@@ -208,7 +219,7 @@ entry:
 define <vscale x 4 x i8> @intrinsic_vslideup_vi_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -216,7 +227,8 @@ entry:
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
     i32 9,
-    i32 %2)
+    i32 %2,
+    i32 1)
 
   ret <vscale x 4 x i8> %a
 }
@@ -242,12 +254,14 @@ declare <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   i32,
-  i32);
+  i32,
+  i32
+);
 
 define <vscale x 8 x i8> @intrinsic_vslideup_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -255,7 +269,8 @@ entry:
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
     i32 %2,
-    i32 %3)
+    i32 %3,
+    i32 1)
 
   ret <vscale x 8 x i8> %a
 }
@@ -287,7 +302,7 @@ entry:
 define <vscale x 8 x i8> @intrinsic_vslideup_vi_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -295,7 +310,8 @@ entry:
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
     i32 9,
-    i32 %2)
+    i32 %2,
+    i32 1)
 
   ret <vscale x 8 x i8> %a
 }
@@ -321,12 +337,14 @@ declare <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   i32,
-  i32);
+  i32,
+  i32
+);
 
 define <vscale x 16 x i8> @intrinsic_vslideup_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -334,7 +352,8 @@ entry:
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
     i32 %2,
-    i32 %3)
+    i32 %3,
+    i32 1)
 
   ret <vscale x 16 x i8> %a
 }
@@ -366,7 +385,7 @@ entry:
 define <vscale x 16 x i8> @intrinsic_vslideup_vi_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -374,7 +393,8 @@ entry:
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
     i32 9,
-    i32 %2)
+    i32 %2,
+    i32 1)
 
   ret <vscale x 16 x i8> %a
 }
@@ -400,12 +420,14 @@ declare <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   i32,
-  i32);
+  i32,
+  i32
+);
 
 define <vscale x 32 x i8> @intrinsic_vslideup_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -413,7 +435,8 @@ entry:
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
     i32 %2,
-    i32 %3)
+    i32 %3,
+    i32 1)
 
   ret <vscale x 32 x i8> %a
 }
@@ -445,7 +468,7 @@ entry:
 define <vscale x 32 x i8> @intrinsic_vslideup_vi_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -453,7 +476,8 @@ entry:
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
     i32 9,
-    i32 %2)
+    i32 %2,
+    i32 1)
 
   ret <vscale x 32 x i8> %a
 }
@@ -479,12 +503,14 @@ declare <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   i32,
-  i32);
+  i32,
+  i32
+);
 
 define <vscale x 1 x i16> @intrinsic_vslideup_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -492,7 +518,8 @@ entry:
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
     i32 %2,
-    i32 %3)
+    i32 %3,
+    i32 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -524,7 +551,7 @@ entry:
 define <vscale x 1 x i16> @intrinsic_vslideup_vi_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -532,7 +559,8 @@ entry:
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
     i32 9,
-    i32 %2)
+    i32 %2,
+    i32 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -558,12 +586,14 @@ declare <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   i32,
-  i32);
+  i32,
+  i32
+);
 
 define <vscale x 2 x i16> @intrinsic_vslideup_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -571,7 +601,8 @@ entry:
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
     i32 %2,
-    i32 %3)
+    i32 %3,
+    i32 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -603,7 +634,7 @@ entry:
 define <vscale x 2 x i16> @intrinsic_vslideup_vi_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -611,7 +642,8 @@ entry:
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
     i32 9,
-    i32 %2)
+    i32 %2,
+    i32 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -637,12 +669,14 @@ declare <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   i32,
-  i32);
+  i32,
+  i32
+);
 
 define <vscale x 4 x i16> @intrinsic_vslideup_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -650,7 +684,8 @@ entry:
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
     i32 %2,
-    i32 %3)
+    i32 %3,
+    i32 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -682,7 +717,7 @@ entry:
 define <vscale x 4 x i16> @intrinsic_vslideup_vi_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -690,7 +725,8 @@ entry:
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
     i32 9,
-    i32 %2)
+    i32 %2,
+    i32 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -716,12 +752,14 @@ declare <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   i32,
-  i32);
+  i32,
+  i32
+);
 
 define <vscale x 8 x i16> @intrinsic_vslideup_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -729,7 +767,8 @@ entry:
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
     i32 %2,
-    i32 %3)
+    i32 %3,
+    i32 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -761,7 +800,7 @@ entry:
 define <vscale x 8 x i16> @intrinsic_vslideup_vi_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -769,7 +808,8 @@ entry:
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
     i32 9,
-    i32 %2)
+    i32 %2,
+    i32 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -795,12 +835,14 @@ declare <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   i32,
-  i32);
+  i32,
+  i32
+);
 
 define <vscale x 16 x i16> @intrinsic_vslideup_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -808,7 +850,8 @@ entry:
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
     i32 %2,
-    i32 %3)
+    i32 %3,
+    i32 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -840,7 +883,7 @@ entry:
 define <vscale x 16 x i16> @intrinsic_vslideup_vi_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -848,7 +891,8 @@ entry:
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
     i32 9,
-    i32 %2)
+    i32 %2,
+    i32 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -874,12 +918,14 @@ declare <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   i32,
-  i32);
+  i32,
+  i32
+);
 
 define <vscale x 1 x i32> @intrinsic_vslideup_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -887,7 +933,8 @@ entry:
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
     i32 %2,
-    i32 %3)
+    i32 %3,
+    i32 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -919,7 +966,7 @@ entry:
 define <vscale x 1 x i32> @intrinsic_vslideup_vi_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -927,7 +974,8 @@ entry:
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
     i32 9,
-    i32 %2)
+    i32 %2,
+    i32 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -953,12 +1001,14 @@ declare <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   i32,
-  i32);
+  i32,
+  i32
+);
 
 define <vscale x 2 x i32> @intrinsic_vslideup_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -966,7 +1016,8 @@ entry:
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
     i32 %2,
-    i32 %3)
+    i32 %3,
+    i32 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -998,7 +1049,7 @@ entry:
 define <vscale x 2 x i32> @intrinsic_vslideup_vi_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -1006,7 +1057,8 @@ entry:
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
     i32 9,
-    i32 %2)
+    i32 %2,
+    i32 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1032,12 +1084,14 @@ declare <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   i32,
-  i32);
+  i32,
+  i32
+);
 
 define <vscale x 4 x i32> @intrinsic_vslideup_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1045,7 +1099,8 @@ entry:
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
     i32 %2,
-    i32 %3)
+    i32 %3,
+    i32 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1077,7 +1132,7 @@ entry:
 define <vscale x 4 x i32> @intrinsic_vslideup_vi_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -1085,7 +1140,8 @@ entry:
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
     i32 9,
-    i32 %2)
+    i32 %2,
+    i32 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1111,12 +1167,14 @@ declare <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   i32,
-  i32);
+  i32,
+  i32
+);
 
 define <vscale x 8 x i32> @intrinsic_vslideup_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1124,7 +1182,8 @@ entry:
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
     i32 %2,
-    i32 %3)
+    i32 %3,
+    i32 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1156,7 +1215,7 @@ entry:
 define <vscale x 8 x i32> @intrinsic_vslideup_vi_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -1164,7 +1223,8 @@ entry:
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
     i32 9,
-    i32 %2)
+    i32 %2,
+    i32 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1190,12 +1250,14 @@ declare <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   i32,
-  i32);
+  i32,
+  i32
+);
 
 define <vscale x 1 x i64> @intrinsic_vslideup_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1203,7 +1265,8 @@ entry:
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
     i32 %2,
-    i32 %3)
+    i32 %3,
+    i32 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1235,7 +1298,7 @@ entry:
 define <vscale x 1 x i64> @intrinsic_vslideup_vi_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -1243,7 +1306,8 @@ entry:
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
     i32 9,
-    i32 %2)
+    i32 %2,
+    i32 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1269,12 +1333,14 @@ declare <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   i32,
-  i32);
+  i32,
+  i32
+);
 
 define <vscale x 2 x i64> @intrinsic_vslideup_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1282,7 +1348,8 @@ entry:
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
     i32 %2,
-    i32 %3)
+    i32 %3,
+    i32 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1314,7 +1381,7 @@ entry:
 define <vscale x 2 x i64> @intrinsic_vslideup_vi_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -1322,7 +1389,8 @@ entry:
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
     i32 9,
-    i32 %2)
+    i32 %2,
+    i32 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1348,12 +1416,14 @@ declare <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   i32,
-  i32);
+  i32,
+  i32
+);
 
 define <vscale x 4 x i64> @intrinsic_vslideup_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1361,7 +1431,8 @@ entry:
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
     i32 %2,
-    i32 %3)
+    i32 %3,
+    i32 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -1393,7 +1464,7 @@ entry:
 define <vscale x 4 x i64> @intrinsic_vslideup_vi_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -1401,7 +1472,8 @@ entry:
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
     i32 9,
-    i32 %2)
+    i32 %2,
+    i32 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -1427,12 +1499,14 @@ declare <vscale x 1 x half> @llvm.riscv.vslideup.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   i32,
-  i32);
+  i32,
+  i32
+);
 
 define <vscale x 1 x half> @intrinsic_vslideup_vx_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1440,7 +1514,8 @@ entry:
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
     i32 %2,
-    i32 %3)
+    i32 %3,
+    i32 1)
 
   ret <vscale x 1 x half> %a
 }
@@ -1472,7 +1547,7 @@ entry:
 define <vscale x 1 x half> @intrinsic_vslideup_vi_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -1480,7 +1555,8 @@ entry:
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
     i32 9,
-    i32 %2)
+    i32 %2,
+    i32 1)
 
   ret <vscale x 1 x half> %a
 }
@@ -1506,12 +1582,14 @@ declare <vscale x 2 x half> @llvm.riscv.vslideup.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   i32,
-  i32);
+  i32,
+  i32
+);
 
 define <vscale x 2 x half> @intrinsic_vslideup_vx_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1519,7 +1597,8 @@ entry:
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
     i32 %2,
-    i32 %3)
+    i32 %3,
+    i32 1)
 
   ret <vscale x 2 x half> %a
 }
@@ -1551,7 +1630,7 @@ entry:
 define <vscale x 2 x half> @intrinsic_vslideup_vi_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -1559,7 +1638,8 @@ entry:
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
     i32 9,
-    i32 %2)
+    i32 %2,
+    i32 1)
 
   ret <vscale x 2 x half> %a
 }
@@ -1585,12 +1665,14 @@ declare <vscale x 4 x half> @llvm.riscv.vslideup.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   i32,
-  i32);
+  i32,
+  i32
+);
 
 define <vscale x 4 x half> @intrinsic_vslideup_vx_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1598,7 +1680,8 @@ entry:
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
     i32 %2,
-    i32 %3)
+    i32 %3,
+    i32 1)
 
   ret <vscale x 4 x half> %a
 }
@@ -1630,7 +1713,7 @@ entry:
 define <vscale x 4 x half> @intrinsic_vslideup_vi_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -1638,7 +1721,8 @@ entry:
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
     i32 9,
-    i32 %2)
+    i32 %2,
+    i32 1)
 
   ret <vscale x 4 x half> %a
 }
@@ -1664,12 +1748,14 @@ declare <vscale x 8 x half> @llvm.riscv.vslideup.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   i32,
-  i32);
+  i32,
+  i32
+);
 
 define <vscale x 8 x half> @intrinsic_vslideup_vx_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1677,7 +1763,8 @@ entry:
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
     i32 %2,
-    i32 %3)
+    i32 %3,
+    i32 1)
 
   ret <vscale x 8 x half> %a
 }
@@ -1709,7 +1796,7 @@ entry:
 define <vscale x 8 x half> @intrinsic_vslideup_vi_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -1717,7 +1804,8 @@ entry:
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
     i32 9,
-    i32 %2)
+    i32 %2,
+    i32 1)
 
   ret <vscale x 8 x half> %a
 }
@@ -1743,12 +1831,14 @@ declare <vscale x 16 x half> @llvm.riscv.vslideup.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   i32,
-  i32);
+  i32,
+  i32
+);
 
 define <vscale x 16 x half> @intrinsic_vslideup_vx_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1756,7 +1846,8 @@ entry:
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
     i32 %2,
-    i32 %3)
+    i32 %3,
+    i32 1)
 
   ret <vscale x 16 x half> %a
 }
@@ -1788,7 +1879,7 @@ entry:
 define <vscale x 16 x half> @intrinsic_vslideup_vi_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -1796,7 +1887,8 @@ entry:
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
     i32 9,
-    i32 %2)
+    i32 %2,
+    i32 1)
 
   ret <vscale x 16 x half> %a
 }
@@ -1822,12 +1914,14 @@ declare <vscale x 1 x float> @llvm.riscv.vslideup.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   i32,
-  i32);
+  i32,
+  i32
+);
 
 define <vscale x 1 x float> @intrinsic_vslideup_vx_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1835,7 +1929,8 @@ entry:
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
     i32 %2,
-    i32 %3)
+    i32 %3,
+    i32 1)
 
   ret <vscale x 1 x float> %a
 }
@@ -1867,7 +1962,7 @@ entry:
 define <vscale x 1 x float> @intrinsic_vslideup_vi_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -1875,7 +1970,8 @@ entry:
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
     i32 9,
-    i32 %2)
+    i32 %2,
+    i32 1)
 
   ret <vscale x 1 x float> %a
 }
@@ -1901,12 +1997,14 @@ declare <vscale x 2 x float> @llvm.riscv.vslideup.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   i32,
-  i32);
+  i32,
+  i32
+);
 
 define <vscale x 2 x float> @intrinsic_vslideup_vx_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1914,7 +2012,8 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
     i32 %2,
-    i32 %3)
+    i32 %3,
+    i32 1)
 
   ret <vscale x 2 x float> %a
 }
@@ -1946,7 +2045,7 @@ entry:
 define <vscale x 2 x float> @intrinsic_vslideup_vi_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -1954,7 +2053,8 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
     i32 9,
-    i32 %2)
+    i32 %2,
+    i32 1)
 
   ret <vscale x 2 x float> %a
 }
@@ -1980,12 +2080,14 @@ declare <vscale x 4 x float> @llvm.riscv.vslideup.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   i32,
-  i32);
+  i32,
+  i32
+);
 
 define <vscale x 4 x float> @intrinsic_vslideup_vx_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1993,7 +2095,8 @@ entry:
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
     i32 %2,
-    i32 %3)
+    i32 %3,
+    i32 1)
 
   ret <vscale x 4 x float> %a
 }
@@ -2025,7 +2128,7 @@ entry:
 define <vscale x 4 x float> @intrinsic_vslideup_vi_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -2033,7 +2136,8 @@ entry:
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
     i32 9,
-    i32 %2)
+    i32 %2,
+    i32 1)
 
   ret <vscale x 4 x float> %a
 }
@@ -2059,12 +2163,14 @@ declare <vscale x 8 x float> @llvm.riscv.vslideup.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   i32,
-  i32);
+  i32,
+  i32
+);
 
 define <vscale x 8 x float> @intrinsic_vslideup_vx_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -2072,7 +2178,8 @@ entry:
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
     i32 %2,
-    i32 %3)
+    i32 %3,
+    i32 1)
 
   ret <vscale x 8 x float> %a
 }
@@ -2104,7 +2211,7 @@ entry:
 define <vscale x 8 x float> @intrinsic_vslideup_vi_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -2112,7 +2219,8 @@ entry:
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
     i32 9,
-    i32 %2)
+    i32 %2,
+    i32 1)
 
   ret <vscale x 8 x float> %a
 }
@@ -2138,12 +2246,14 @@ declare <vscale x 1 x double> @llvm.riscv.vslideup.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
   i32,
-  i32);
+  i32,
+  i32
+);
 
 define <vscale x 1 x double> @intrinsic_vslideup_vx_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i32 %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -2151,7 +2261,8 @@ entry:
     <vscale x 1 x double> %0,
     <vscale x 1 x double> %1,
     i32 %2,
-    i32 %3)
+    i32 %3,
+    i32 1)
 
   ret <vscale x 1 x double> %a
 }
@@ -2183,7 +2294,7 @@ entry:
 define <vscale x 1 x double> @intrinsic_vslideup_vi_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -2191,7 +2302,8 @@ entry:
     <vscale x 1 x double> %0,
     <vscale x 1 x double> %1,
     i32 9,
-    i32 %2)
+    i32 %2,
+    i32 1)
 
   ret <vscale x 1 x double> %a
 }
@@ -2217,12 +2329,14 @@ declare <vscale x 2 x double> @llvm.riscv.vslideup.nxv2f64(
   <vscale x 2 x double>,
   <vscale x 2 x double>,
   i32,
-  i32);
+  i32,
+  i32
+);
 
 define <vscale x 2 x double> @intrinsic_vslideup_vx_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i32 %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -2230,7 +2344,8 @@ entry:
     <vscale x 2 x double> %0,
     <vscale x 2 x double> %1,
     i32 %2,
-    i32 %3)
+    i32 %3,
+    i32 1)
 
   ret <vscale x 2 x double> %a
 }
@@ -2262,7 +2377,7 @@ entry:
 define <vscale x 2 x double> @intrinsic_vslideup_vi_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -2270,7 +2385,8 @@ entry:
     <vscale x 2 x double> %0,
     <vscale x 2 x double> %1,
     i32 9,
-    i32 %2)
+    i32 %2,
+    i32 1)
 
   ret <vscale x 2 x double> %a
 }
@@ -2296,12 +2412,14 @@ declare <vscale x 4 x double> @llvm.riscv.vslideup.nxv4f64(
   <vscale x 4 x double>,
   <vscale x 4 x double>,
   i32,
-  i32);
+  i32,
+  i32
+);
 
 define <vscale x 4 x double> @intrinsic_vslideup_vx_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i32 %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -2309,7 +2427,8 @@ entry:
     <vscale x 4 x double> %0,
     <vscale x 4 x double> %1,
     i32 %2,
-    i32 %3)
+    i32 %3,
+    i32 1)
 
   ret <vscale x 4 x double> %a
 }
@@ -2341,7 +2460,7 @@ entry:
 define <vscale x 4 x double> @intrinsic_vslideup_vi_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -2349,7 +2468,8 @@ entry:
     <vscale x 4 x double> %0,
     <vscale x 4 x double> %1,
     i32 9,
-    i32 %2)
+    i32 %2,
+    i32 1)
 
   ret <vscale x 4 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vslideup-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vslideup-rv64.ll
index ed2f21ff5872f..591d3db1c8ba1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vslideup-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vslideup-rv64.ll
@@ -5,12 +5,14 @@ declare <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   i64,
-  i64);
+  i64,
+  i64
+);
 
 define <vscale x 1 x i8> @intrinsic_vslideup_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -18,7 +20,8 @@ entry:
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
     i64 %2,
-    i64 %3)
+    i64 %3,
+    i64 1)
 
   ret <vscale x 1 x i8> %a
 }
@@ -50,7 +53,7 @@ entry:
 define <vscale x 1 x i8> @intrinsic_vslideup_vi_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -58,7 +61,8 @@ entry:
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
     i64 9,
-    i64 %2)
+    i64 %2,
+    i64 1)
 
   ret <vscale x 1 x i8> %a
 }
@@ -84,12 +88,14 @@ declare <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   i64,
-  i64);
+  i64,
+  i64
+);
 
 define <vscale x 2 x i8> @intrinsic_vslideup_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -97,7 +103,8 @@ entry:
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
     i64 %2,
-    i64 %3)
+    i64 %3,
+    i64 1)
 
   ret <vscale x 2 x i8> %a
 }
@@ -129,7 +136,7 @@ entry:
 define <vscale x 2 x i8> @intrinsic_vslideup_vi_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -137,7 +144,8 @@ entry:
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
     i64 9,
-    i64 %2)
+    i64 %2,
+    i64 1)
 
   ret <vscale x 2 x i8> %a
 }
@@ -163,12 +171,14 @@ declare <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   i64,
-  i64);
+  i64,
+  i64
+);
 
 define <vscale x 4 x i8> @intrinsic_vslideup_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -176,7 +186,8 @@ entry:
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
     i64 %2,
-    i64 %3)
+    i64 %3,
+    i64 1)
 
   ret <vscale x 4 x i8> %a
 }
@@ -208,7 +219,7 @@ entry:
 define <vscale x 4 x i8> @intrinsic_vslideup_vi_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -216,7 +227,8 @@ entry:
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
     i64 9,
-    i64 %2)
+    i64 %2,
+    i64 1)
 
   ret <vscale x 4 x i8> %a
 }
@@ -242,12 +254,14 @@ declare <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   i64,
-  i64);
+  i64,
+  i64
+);
 
 define <vscale x 8 x i8> @intrinsic_vslideup_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -255,7 +269,8 @@ entry:
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
     i64 %2,
-    i64 %3)
+    i64 %3,
+    i64 1)
 
   ret <vscale x 8 x i8> %a
 }
@@ -287,7 +302,7 @@ entry:
 define <vscale x 8 x i8> @intrinsic_vslideup_vi_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -295,7 +310,8 @@ entry:
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
     i64 9,
-    i64 %2)
+    i64 %2,
+    i64 1)
 
   ret <vscale x 8 x i8> %a
 }
@@ -321,12 +337,14 @@ declare <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   i64,
-  i64);
+  i64,
+  i64
+);
 
 define <vscale x 16 x i8> @intrinsic_vslideup_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -334,7 +352,8 @@ entry:
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
     i64 %2,
-    i64 %3)
+    i64 %3,
+    i64 1)
 
   ret <vscale x 16 x i8> %a
 }
@@ -366,7 +385,7 @@ entry:
 define <vscale x 16 x i8> @intrinsic_vslideup_vi_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -374,7 +393,8 @@ entry:
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
     i64 9,
-    i64 %2)
+    i64 %2,
+    i64 1)
 
   ret <vscale x 16 x i8> %a
 }
@@ -400,12 +420,14 @@ declare <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   i64,
-  i64);
+  i64,
+  i64
+);
 
 define <vscale x 32 x i8> @intrinsic_vslideup_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -413,7 +435,8 @@ entry:
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
     i64 %2,
-    i64 %3)
+    i64 %3,
+    i64 1)
 
   ret <vscale x 32 x i8> %a
 }
@@ -445,7 +468,7 @@ entry:
 define <vscale x 32 x i8> @intrinsic_vslideup_vi_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -453,7 +476,8 @@ entry:
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
     i64 9,
-    i64 %2)
+    i64 %2,
+    i64 1)
 
   ret <vscale x 32 x i8> %a
 }
@@ -479,12 +503,14 @@ declare <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   i64,
-  i64);
+  i64,
+  i64
+);
 
 define <vscale x 1 x i16> @intrinsic_vslideup_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -492,7 +518,8 @@ entry:
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
     i64 %2,
-    i64 %3)
+    i64 %3,
+    i64 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -524,7 +551,7 @@ entry:
 define <vscale x 1 x i16> @intrinsic_vslideup_vi_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -532,7 +559,8 @@ entry:
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
     i64 9,
-    i64 %2)
+    i64 %2,
+    i64 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -558,12 +586,14 @@ declare <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   i64,
-  i64);
+  i64,
+  i64
+);
 
 define <vscale x 2 x i16> @intrinsic_vslideup_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -571,7 +601,8 @@ entry:
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
     i64 %2,
-    i64 %3)
+    i64 %3,
+    i64 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -603,7 +634,7 @@ entry:
 define <vscale x 2 x i16> @intrinsic_vslideup_vi_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -611,7 +642,8 @@ entry:
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
     i64 9,
-    i64 %2)
+    i64 %2,
+    i64 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -637,12 +669,14 @@ declare <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   i64,
-  i64);
+  i64,
+  i64
+);
 
 define <vscale x 4 x i16> @intrinsic_vslideup_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -650,7 +684,8 @@ entry:
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
     i64 %2,
-    i64 %3)
+    i64 %3,
+    i64 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -682,7 +717,7 @@ entry:
 define <vscale x 4 x i16> @intrinsic_vslideup_vi_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -690,7 +725,8 @@ entry:
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
     i64 9,
-    i64 %2)
+    i64 %2,
+    i64 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -716,12 +752,14 @@ declare <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   i64,
-  i64);
+  i64,
+  i64
+);
 
 define <vscale x 8 x i16> @intrinsic_vslideup_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -729,7 +767,8 @@ entry:
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
     i64 %2,
-    i64 %3)
+    i64 %3,
+    i64 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -761,7 +800,7 @@ entry:
 define <vscale x 8 x i16> @intrinsic_vslideup_vi_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -769,7 +808,8 @@ entry:
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
     i64 9,
-    i64 %2)
+    i64 %2,
+    i64 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -795,12 +835,14 @@ declare <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   i64,
-  i64);
+  i64,
+  i64
+);
 
 define <vscale x 16 x i16> @intrinsic_vslideup_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -808,7 +850,8 @@ entry:
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
     i64 %2,
-    i64 %3)
+    i64 %3,
+    i64 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -840,7 +883,7 @@ entry:
 define <vscale x 16 x i16> @intrinsic_vslideup_vi_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -848,7 +891,8 @@ entry:
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
     i64 9,
-    i64 %2)
+    i64 %2,
+    i64 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -874,12 +918,14 @@ declare <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   i64,
-  i64);
+  i64,
+  i64
+);
 
 define <vscale x 1 x i32> @intrinsic_vslideup_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -887,7 +933,8 @@ entry:
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
     i64 %2,
-    i64 %3)
+    i64 %3,
+    i64 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -919,7 +966,7 @@ entry:
 define <vscale x 1 x i32> @intrinsic_vslideup_vi_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -927,7 +974,8 @@ entry:
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
     i64 9,
-    i64 %2)
+    i64 %2,
+    i64 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -953,12 +1001,14 @@ declare <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   i64,
-  i64);
+  i64,
+  i64
+);
 
 define <vscale x 2 x i32> @intrinsic_vslideup_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -966,7 +1016,8 @@ entry:
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
     i64 %2,
-    i64 %3)
+    i64 %3,
+    i64 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -998,7 +1049,7 @@ entry:
 define <vscale x 2 x i32> @intrinsic_vslideup_vi_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -1006,7 +1057,8 @@ entry:
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
     i64 9,
-    i64 %2)
+    i64 %2,
+    i64 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1032,12 +1084,14 @@ declare <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   i64,
-  i64);
+  i64,
+  i64
+);
 
 define <vscale x 4 x i32> @intrinsic_vslideup_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1045,7 +1099,8 @@ entry:
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
     i64 %2,
-    i64 %3)
+    i64 %3,
+    i64 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1077,7 +1132,7 @@ entry:
 define <vscale x 4 x i32> @intrinsic_vslideup_vi_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -1085,7 +1140,8 @@ entry:
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
     i64 9,
-    i64 %2)
+    i64 %2,
+    i64 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1111,12 +1167,14 @@ declare <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   i64,
-  i64);
+  i64,
+  i64
+);
 
 define <vscale x 8 x i32> @intrinsic_vslideup_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1124,7 +1182,8 @@ entry:
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
     i64 %2,
-    i64 %3)
+    i64 %3,
+    i64 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1156,7 +1215,7 @@ entry:
 define <vscale x 8 x i32> @intrinsic_vslideup_vi_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -1164,7 +1223,8 @@ entry:
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
     i64 9,
-    i64 %2)
+    i64 %2,
+    i64 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1190,12 +1250,14 @@ declare <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   i64,
-  i64);
+  i64,
+  i64
+);
 
 define <vscale x 1 x i64> @intrinsic_vslideup_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1203,7 +1265,8 @@ entry:
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
     i64 %2,
-    i64 %3)
+    i64 %3,
+    i64 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1235,7 +1298,7 @@ entry:
 define <vscale x 1 x i64> @intrinsic_vslideup_vi_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -1243,7 +1306,8 @@ entry:
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
     i64 9,
-    i64 %2)
+    i64 %2,
+    i64 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1269,12 +1333,14 @@ declare <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   i64,
-  i64);
+  i64,
+  i64
+);
 
 define <vscale x 2 x i64> @intrinsic_vslideup_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1282,7 +1348,8 @@ entry:
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
     i64 %2,
-    i64 %3)
+    i64 %3,
+    i64 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1314,7 +1381,7 @@ entry:
 define <vscale x 2 x i64> @intrinsic_vslideup_vi_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -1322,7 +1389,8 @@ entry:
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
     i64 9,
-    i64 %2)
+    i64 %2,
+    i64 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1348,12 +1416,14 @@ declare <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   i64,
-  i64);
+  i64,
+  i64
+);
 
 define <vscale x 4 x i64> @intrinsic_vslideup_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1361,7 +1431,8 @@ entry:
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
     i64 %2,
-    i64 %3)
+    i64 %3,
+    i64 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -1393,7 +1464,7 @@ entry:
 define <vscale x 4 x i64> @intrinsic_vslideup_vi_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -1401,7 +1472,8 @@ entry:
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
     i64 9,
-    i64 %2)
+    i64 %2,
+    i64 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -1427,12 +1499,14 @@ declare <vscale x 1 x half> @llvm.riscv.vslideup.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   i64,
-  i64);
+  i64,
+  i64
+);
 
 define <vscale x 1 x half> @intrinsic_vslideup_vx_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i64 %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1440,7 +1514,8 @@ entry:
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
     i64 %2,
-    i64 %3)
+    i64 %3,
+    i64 1)
 
   ret <vscale x 1 x half> %a
 }
@@ -1472,7 +1547,7 @@ entry:
 define <vscale x 1 x half> @intrinsic_vslideup_vi_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -1480,7 +1555,8 @@ entry:
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
     i64 9,
-    i64 %2)
+    i64 %2,
+    i64 1)
 
   ret <vscale x 1 x half> %a
 }
@@ -1506,12 +1582,14 @@ declare <vscale x 2 x half> @llvm.riscv.vslideup.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   i64,
-  i64);
+  i64,
+  i64
+);
 
 define <vscale x 2 x half> @intrinsic_vslideup_vx_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i64 %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1519,7 +1597,8 @@ entry:
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
     i64 %2,
-    i64 %3)
+    i64 %3,
+    i64 1)
 
   ret <vscale x 2 x half> %a
 }
@@ -1551,7 +1630,7 @@ entry:
 define <vscale x 2 x half> @intrinsic_vslideup_vi_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -1559,7 +1638,8 @@ entry:
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
     i64 9,
-    i64 %2)
+    i64 %2,
+    i64 1)
 
   ret <vscale x 2 x half> %a
 }
@@ -1585,12 +1665,14 @@ declare <vscale x 4 x half> @llvm.riscv.vslideup.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   i64,
-  i64);
+  i64,
+  i64
+);
 
 define <vscale x 4 x half> @intrinsic_vslideup_vx_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i64 %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1598,7 +1680,8 @@ entry:
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
     i64 %2,
-    i64 %3)
+    i64 %3,
+    i64 1)
 
   ret <vscale x 4 x half> %a
 }
@@ -1630,7 +1713,7 @@ entry:
 define <vscale x 4 x half> @intrinsic_vslideup_vi_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -1638,7 +1721,8 @@ entry:
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
     i64 9,
-    i64 %2)
+    i64 %2,
+    i64 1)
 
   ret <vscale x 4 x half> %a
 }
@@ -1664,12 +1748,14 @@ declare <vscale x 8 x half> @llvm.riscv.vslideup.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   i64,
-  i64);
+  i64,
+  i64
+);
 
 define <vscale x 8 x half> @intrinsic_vslideup_vx_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i64 %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1677,7 +1763,8 @@ entry:
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
     i64 %2,
-    i64 %3)
+    i64 %3,
+    i64 1)
 
   ret <vscale x 8 x half> %a
 }
@@ -1709,7 +1796,7 @@ entry:
 define <vscale x 8 x half> @intrinsic_vslideup_vi_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -1717,7 +1804,8 @@ entry:
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
     i64 9,
-    i64 %2)
+    i64 %2,
+    i64 1)
 
   ret <vscale x 8 x half> %a
 }
@@ -1743,12 +1831,14 @@ declare <vscale x 16 x half> @llvm.riscv.vslideup.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   i64,
-  i64);
+  i64,
+  i64
+);
 
 define <vscale x 16 x half> @intrinsic_vslideup_vx_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i64 %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1756,7 +1846,8 @@ entry:
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
     i64 %2,
-    i64 %3)
+    i64 %3,
+    i64 1)
 
   ret <vscale x 16 x half> %a
 }
@@ -1788,7 +1879,7 @@ entry:
 define <vscale x 16 x half> @intrinsic_vslideup_vi_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -1796,7 +1887,8 @@ entry:
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
     i64 9,
-    i64 %2)
+    i64 %2,
+    i64 1)
 
   ret <vscale x 16 x half> %a
 }
@@ -1822,12 +1914,14 @@ declare <vscale x 1 x float> @llvm.riscv.vslideup.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   i64,
-  i64);
+  i64,
+  i64
+);
 
 define <vscale x 1 x float> @intrinsic_vslideup_vx_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i64 %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1835,7 +1929,8 @@ entry:
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
     i64 %2,
-    i64 %3)
+    i64 %3,
+    i64 1)
 
   ret <vscale x 1 x float> %a
 }
@@ -1867,7 +1962,7 @@ entry:
 define <vscale x 1 x float> @intrinsic_vslideup_vi_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -1875,7 +1970,8 @@ entry:
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
     i64 9,
-    i64 %2)
+    i64 %2,
+    i64 1)
 
   ret <vscale x 1 x float> %a
 }
@@ -1901,12 +1997,14 @@ declare <vscale x 2 x float> @llvm.riscv.vslideup.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   i64,
-  i64);
+  i64,
+  i64
+);
 
 define <vscale x 2 x float> @intrinsic_vslideup_vx_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i64 %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1914,7 +2012,8 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
     i64 %2,
-    i64 %3)
+    i64 %3,
+    i64 1)
 
   ret <vscale x 2 x float> %a
 }
@@ -1946,7 +2045,7 @@ entry:
 define <vscale x 2 x float> @intrinsic_vslideup_vi_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -1954,7 +2053,8 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
     i64 9,
-    i64 %2)
+    i64 %2,
+    i64 1)
 
   ret <vscale x 2 x float> %a
 }
@@ -1980,12 +2080,14 @@ declare <vscale x 4 x float> @llvm.riscv.vslideup.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   i64,
-  i64);
+  i64,
+  i64
+);
 
 define <vscale x 4 x float> @intrinsic_vslideup_vx_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i64 %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1993,7 +2095,8 @@ entry:
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
     i64 %2,
-    i64 %3)
+    i64 %3,
+    i64 1)
 
   ret <vscale x 4 x float> %a
 }
@@ -2025,7 +2128,7 @@ entry:
 define <vscale x 4 x float> @intrinsic_vslideup_vi_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -2033,7 +2136,8 @@ entry:
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
     i64 9,
-    i64 %2)
+    i64 %2,
+    i64 1)
 
   ret <vscale x 4 x float> %a
 }
@@ -2059,12 +2163,14 @@ declare <vscale x 8 x float> @llvm.riscv.vslideup.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   i64,
-  i64);
+  i64,
+  i64
+);
 
 define <vscale x 8 x float> @intrinsic_vslideup_vx_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i64 %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -2072,7 +2178,8 @@ entry:
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
     i64 %2,
-    i64 %3)
+    i64 %3,
+    i64 1)
 
   ret <vscale x 8 x float> %a
 }
@@ -2104,7 +2211,7 @@ entry:
 define <vscale x 8 x float> @intrinsic_vslideup_vi_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -2112,7 +2219,8 @@ entry:
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
     i64 9,
-    i64 %2)
+    i64 %2,
+    i64 1)
 
   ret <vscale x 8 x float> %a
 }
@@ -2138,12 +2246,14 @@ declare <vscale x 1 x double> @llvm.riscv.vslideup.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
   i64,
-  i64);
+  i64,
+  i64
+);
 
 define <vscale x 1 x double> @intrinsic_vslideup_vx_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i64 %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -2151,7 +2261,8 @@ entry:
     <vscale x 1 x double> %0,
     <vscale x 1 x double> %1,
     i64 %2,
-    i64 %3)
+    i64 %3,
+    i64 1)
 
   ret <vscale x 1 x double> %a
 }
@@ -2183,7 +2294,7 @@ entry:
 define <vscale x 1 x double> @intrinsic_vslideup_vi_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -2191,7 +2302,8 @@ entry:
     <vscale x 1 x double> %0,
     <vscale x 1 x double> %1,
     i64 9,
-    i64 %2)
+    i64 %2,
+    i64 1)
 
   ret <vscale x 1 x double> %a
 }
@@ -2217,12 +2329,14 @@ declare <vscale x 2 x double> @llvm.riscv.vslideup.nxv2f64(
   <vscale x 2 x double>,
   <vscale x 2 x double>,
   i64,
-  i64);
+  i64,
+  i64
+);
 
 define <vscale x 2 x double> @intrinsic_vslideup_vx_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i64 %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -2230,7 +2344,8 @@ entry:
     <vscale x 2 x double> %0,
     <vscale x 2 x double> %1,
     i64 %2,
-    i64 %3)
+    i64 %3,
+    i64 1)
 
   ret <vscale x 2 x double> %a
 }
@@ -2262,7 +2377,7 @@ entry:
 define <vscale x 2 x double> @intrinsic_vslideup_vi_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -2270,7 +2385,8 @@ entry:
     <vscale x 2 x double> %0,
     <vscale x 2 x double> %1,
     i64 9,
-    i64 %2)
+    i64 %2,
+    i64 1)
 
   ret <vscale x 2 x double> %a
 }
@@ -2296,12 +2412,14 @@ declare <vscale x 4 x double> @llvm.riscv.vslideup.nxv4f64(
   <vscale x 4 x double>,
   <vscale x 4 x double>,
   i64,
-  i64);
+  i64,
+  i64
+);
 
 define <vscale x 4 x double> @intrinsic_vslideup_vx_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i64 %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -2309,7 +2427,8 @@ entry:
     <vscale x 4 x double> %0,
     <vscale x 4 x double> %1,
     i64 %2,
-    i64 %3)
+    i64 %3,
+    i64 1)
 
   ret <vscale x 4 x double> %a
 }
@@ -2341,7 +2460,7 @@ entry:
 define <vscale x 4 x double> @intrinsic_vslideup_vi_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -2349,7 +2468,8 @@ entry:
     <vscale x 4 x double> %0,
     <vscale x 4 x double> %1,
     i64 9,
-    i64 %2)
+    i64 %2,
+    i64 1)
 
   ret <vscale x 4 x double> %a
 }


        


More information about the cfe-commits mailing list