[llvm] 7638409 - [RISCV] Make vsetvli intrinsics default to MA.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Mon Feb 13 10:40:20 PST 2023


Author: Craig Topper
Date: 2023-02-13T10:39:55-08:00
New Revision: 7638409d436394a2007111cd3eea14ca982b0061

URL: https://github.com/llvm/llvm-project/commit/7638409d436394a2007111cd3eea14ca982b0061
DIFF: https://github.com/llvm/llvm-project/commit/7638409d436394a2007111cd3eea14ca982b0061.diff

LOG: [RISCV] Make vsetvli intrinsics default to MA.

The vsetvli insertion pass can replace it with MU if needed by
a using instruction. The vsetvli insertion pass will not convert
MU to MA so we need to start at MA.

Reviewed By: eopXD

Differential Revision: https://reviews.llvm.org/D143790

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
    llvm/test/CodeGen/RISCV/rvv/masked-vslide1down-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/rvv-out-arguments.ll
    llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll
    llvm/test/CodeGen/RISCV/rvv/vsetvl-ext.ll
    llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll
    llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll
    llvm/test/CodeGen/RISCV/rvv/vsetvli-intrinsics.ll
    llvm/test/CodeGen/RISCV/rvv/vslide1down-constant-vl-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vslide1down-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vslide1up-constant-vl-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vslide1up-rv32.ll
    llvm/test/Transforms/LoopStrengthReduce/RISCV/lsr-drop-solution.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index e156f4dc82e56..e9ef19a33296d 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -553,7 +553,7 @@ void RISCVDAGToDAGISel::selectVSETVLI(SDNode *Node) {
       Node->getConstantOperandVal(Offset + 1) & 0x7);
 
   unsigned VTypeI = RISCVVType::encodeVTYPE(VLMul, SEW, /*TailAgnostic*/ true,
-                                            /*MaskAgnostic*/ false);
+                                            /*MaskAgnostic*/ true);
   SDValue VTypeIOp = CurDAG->getTargetConstant(VTypeI, DL, XLenVT);
 
   SmallVector<EVT, 2> VTs = {XLenVT};

diff  --git a/llvm/test/CodeGen/RISCV/rvv/masked-vslide1down-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/masked-vslide1down-rv32.ll
index 7350e3cfcf154..038260b36954b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/masked-vslide1down-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/masked-vslide1down-rv32.ll
@@ -13,7 +13,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vslide1down.mask.nxv1i64.i64(
 define <vscale x 1 x i64> @intrinsic_vslide1down_mask_tumu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1down_mask_tumu_vx_nxv1i64_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a3, a2, e64, m1, ta, mu
+; CHECK-NEXT:    vsetvli a3, a2, e64, m1, ta, ma
 ; CHECK-NEXT:    slli a3, a3, 1
 ; CHECK-NEXT:    vsetvli zero, a3, e32, m1, ta, ma
 ; CHECK-NEXT:    vslide1down.vx v9, v9, a0
@@ -35,7 +35,7 @@ entry:
 define <vscale x 1 x i64> @intrinsic_vslide1down_mask_tamu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1down_mask_tamu_vx_nxv1i64_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a3, a2, e64, m1, ta, mu
+; CHECK-NEXT:    vsetvli a3, a2, e64, m1, ta, ma
 ; CHECK-NEXT:    slli a3, a3, 1
 ; CHECK-NEXT:    vsetvli zero, a3, e32, m1, ta, ma
 ; CHECK-NEXT:    vslide1down.vx v9, v9, a0
@@ -59,7 +59,7 @@ entry:
 define <vscale x 1 x i64> @intrinsic_vslide1down_mask_tuma_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1down_mask_tuma_vx_nxv1i64_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a3, a2, e64, m1, ta, mu
+; CHECK-NEXT:    vsetvli a3, a2, e64, m1, ta, ma
 ; CHECK-NEXT:    slli a3, a3, 1
 ; CHECK-NEXT:    vsetvli zero, a3, e32, m1, ta, ma
 ; CHECK-NEXT:    vslide1down.vx v9, v9, a0
@@ -82,7 +82,7 @@ entry:
 define <vscale x 1 x i64> @intrinsic_vslide1down_mask_tama_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1down_mask_tama_vx_nxv1i64_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a2, e64, m1, ta, mu
+; CHECK-NEXT:    vsetvli a2, a2, e64, m1, ta, ma
 ; CHECK-NEXT:    slli a2, a2, 1
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
@@ -102,7 +102,7 @@ entry:
 define <vscale x 1 x i64> @intrinsic_vslide1down_mask_tama_undef_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1down_mask_tama_undef_mask_vx_nxv1i64_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a2, e64, m1, ta, mu
+; CHECK-NEXT:    vsetvli a2, a2, e64, m1, ta, ma
 ; CHECK-NEXT:    slli a2, a2, 1
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0

diff  --git a/llvm/test/CodeGen/RISCV/rvv/rvv-out-arguments.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-out-arguments.ll
index 521599b4ccebf..8855ae22d189d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rvv-out-arguments.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rvv-out-arguments.ll
@@ -74,7 +74,7 @@ define dso_local signext i32 @main() #0 {
 ; CHECK-NEXT:    sw zero, -36(s0)
 ; CHECK-NEXT:    sd zero, -48(s0)
 ; CHECK-NEXT:    sd zero, -56(s0)
-; CHECK-NEXT:    vsetivli a0, 4, e32, m8, ta, mu
+; CHECK-NEXT:    vsetivli a0, 4, e32, m8, ta, ma
 ; CHECK-NEXT:    sd a0, -64(s0)
 ; CHECK-NEXT:    ld a0, -64(s0)
 ; CHECK-NEXT:    addi a1, s0, -56

diff  --git a/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll b/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll
index 716963d5e1f30..dfbeb5dd96498 100644
--- a/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll
@@ -705,7 +705,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vslide1down.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, iXLen %3) nounwind {
 ; RV32-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64:
 ; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli a2, a2, e64, m1, ta, mu
+; RV32-NEXT:    vsetvli a2, a2, e64, m1, ta, ma
 ; RV32-NEXT:    slli a2, a2, 1
 ; RV32-NEXT:    vsetvli zero, a2, e32, m1, tu, ma
 ; RV32-NEXT:    vmv1r.v v10, v8
@@ -737,7 +737,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vslide1up.nxv1i64.i64(
 define <vscale x 1 x i64> @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, iXLen %3) nounwind {
 ; RV32-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64:
 ; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    vsetvli a2, a2, e64, m1, ta, mu
+; RV32-NEXT:    vsetvli a2, a2, e64, m1, ta, ma
 ; RV32-NEXT:    slli a2, a2, 1
 ; RV32-NEXT:    vsetvli zero, a2, e32, m1, tu, ma
 ; RV32-NEXT:    vmv1r.v v10, v8

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsetvl-ext.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvl-ext.ll
index 9ea6a7c3ae3a4..5804f8edf84d2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsetvl-ext.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvl-ext.ll
@@ -7,7 +7,7 @@ declare i64 @llvm.riscv.vsetvli(
 define signext i32 @vsetvl_sext() {
 ; CHECK-LABEL: vsetvl_sext:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli a0, 1, e16, m2, ta, mu
+; CHECK-NEXT:    vsetivli a0, 1, e16, m2, ta, ma
 ; CHECK-NEXT:    ret
   %a = call i64 @llvm.riscv.vsetvli(i64 1, i64 1, i64 1)
   %b = trunc i64 %a to i32
@@ -17,7 +17,7 @@ define signext i32 @vsetvl_sext() {
 define zeroext i32 @vsetvl_zext() {
 ; CHECK-LABEL: vsetvl_zext:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli a0, 1, e16, m2, ta, mu
+; CHECK-NEXT:    vsetivli a0, 1, e16, m2, ta, ma
 ; CHECK-NEXT:    ret
   %a = call i64 @llvm.riscv.vsetvli(i64 1, i64 1, i64 1)
   %b = trunc i64 %a to i32
@@ -27,7 +27,7 @@ define zeroext i32 @vsetvl_zext() {
 define i64 @vsetvl_and17bits() {
 ; CHECK-LABEL: vsetvl_and17bits:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli a0, 1, e16, m2, ta, mu
+; CHECK-NEXT:    vsetivli a0, 1, e16, m2, ta, ma
 ; CHECK-NEXT:    ret
   %a = call i64 @llvm.riscv.vsetvli(i64 1, i64 1, i64 1)
   %b = and i64 %a, 131071

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll
index 2bed6c54c6b92..f1b84da0351a9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll
@@ -23,7 +23,7 @@ declare void @llvm.riscv.vse.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>*
 define <vscale x 1 x double> @test1(i64 %avl, i8 zeroext %cond, <vscale x 1 x double> %a, <vscale x 1 x double> %b) nounwind {
 ; CHECK-LABEL: test1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    beqz a1, .LBB0_2
 ; CHECK-NEXT:  # %bb.1: # %if.then
 ; CHECK-NEXT:    vfadd.vv v8, v8, v9
@@ -54,7 +54,7 @@ if.end:                                           ; preds = %if.else, %if.then
 define <vscale x 1 x double> @test2(i64 %avl, i8 zeroext %cond, <vscale x 1 x double> %a, <vscale x 1 x double> %b) nounwind {
 ; CHECK-LABEL: test2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    beqz a1, .LBB1_2
 ; CHECK-NEXT:  # %bb.1: # %if.then
 ; CHECK-NEXT:    vfadd.vv v9, v8, v9
@@ -88,14 +88,13 @@ define <vscale x 1 x double> @test3(i64 %avl, i8 zeroext %cond, <vscale x 1 x do
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    beqz a1, .LBB2_2
 ; CHECK-NEXT:  # %bb.1: # %if.then
-; CHECK-NEXT:    vsetvli a0, a0, e64, m1, ta, mu
+; CHECK-NEXT:    vsetvli a0, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfadd.vv v9, v8, v9
-; CHECK-NEXT:    j .LBB2_3
+; CHECK-NEXT:    vfmul.vv v8, v9, v8
+; CHECK-NEXT:    ret
 ; CHECK-NEXT:  .LBB2_2: # %if.else
-; CHECK-NEXT:    vsetvli a0, a0, e64, m1, ta, mu
+; CHECK-NEXT:    vsetvli a0, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfsub.vv v9, v8, v9
-; CHECK-NEXT:  .LBB2_3: # %if.end
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfmul.vv v8, v9, v8
 ; CHECK-NEXT:    ret
 entry:
@@ -179,7 +178,7 @@ define <vscale x 1 x double> @test5(i64 %avl, i8 zeroext %cond, <vscale x 1 x do
 ; CHECK-LABEL: test5:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    andi a2, a1, 1
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    bnez a2, .LBB4_3
 ; CHECK-NEXT:  # %bb.1: # %if.else
 ; CHECK-NEXT:    vfsub.vv v9, v8, v9
@@ -236,14 +235,14 @@ define <vscale x 1 x double> @test6(i64 %avl, i8 zeroext %cond, <vscale x 1 x do
 ; CHECK-LABEL: test6:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    andi a3, a1, 1
-; CHECK-NEXT:    vsetvli a2, a0, e64, m1, ta, mu
+; CHECK-NEXT:    vsetvli a2, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    bnez a3, .LBB5_3
 ; CHECK-NEXT:  # %bb.1: # %if.else
 ; CHECK-NEXT:    vfsub.vv v8, v8, v9
 ; CHECK-NEXT:    andi a1, a1, 2
 ; CHECK-NEXT:    beqz a1, .LBB5_4
 ; CHECK-NEXT:  .LBB5_2: # %if.then4
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    lui a0, %hi(.LCPI5_0)
 ; CHECK-NEXT:    addi a0, a0, %lo(.LCPI5_0)
 ; CHECK-NEXT:    vlse64.v v9, (a0), zero
@@ -260,7 +259,7 @@ define <vscale x 1 x double> @test6(i64 %avl, i8 zeroext %cond, <vscale x 1 x do
 ; CHECK-NEXT:    andi a1, a1, 2
 ; CHECK-NEXT:    bnez a1, .LBB5_2
 ; CHECK-NEXT:  .LBB5_4: # %if.else5
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    lui a0, 260096
 ; CHECK-NEXT:    vmv.v.x v9, a0
 ; CHECK-NEXT:    lui a0, 262144
@@ -330,7 +329,7 @@ define <vscale x 1 x double> @test8(i64 %avl, i8 zeroext %cond, <vscale x 1 x do
 ; CHECK-NEXT:    csrr a2, vlenb
 ; CHECK-NEXT:    slli a2, a2, 1
 ; CHECK-NEXT:    sub sp, sp, a2
-; CHECK-NEXT:    vsetvli s0, a0, e64, m1, ta, mu
+; CHECK-NEXT:    vsetvli s0, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    beqz a1, .LBB6_2
 ; CHECK-NEXT:  # %bb.1: # %if.then
 ; CHECK-NEXT:    vfadd.vv v8, v8, v9
@@ -389,7 +388,7 @@ define <vscale x 1 x double> @test9(i64 %avl, i8 zeroext %cond, <vscale x 1 x do
 ; CHECK-NEXT:    csrr a2, vlenb
 ; CHECK-NEXT:    slli a2, a2, 1
 ; CHECK-NEXT:    sub sp, sp, a2
-; CHECK-NEXT:    vsetvli s0, a0, e64, m1, ta, mu
+; CHECK-NEXT:    vsetvli s0, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    beqz a1, .LBB7_2
 ; CHECK-NEXT:  # %bb.1: # %if.then
 ; CHECK-NEXT:    vfadd.vv v9, v8, v9
@@ -442,11 +441,10 @@ if.end:                                           ; preds = %if.else, %if.then
 define void @saxpy_vec(i64 %n, float %a, float* nocapture readonly %x, float* nocapture %y) {
 ; CHECK-LABEL: saxpy_vec:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a3, a0, e32, m8, ta, mu
+; CHECK-NEXT:    vsetvli a3, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    beqz a3, .LBB8_2
 ; CHECK-NEXT:  .LBB8_1: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    vsetvli zero, a3, e32, m8, ta, ma
 ; CHECK-NEXT:    vle32.v v8, (a1)
 ; CHECK-NEXT:    vle32.v v16, (a2)
 ; CHECK-NEXT:    slli a4, a3, 2
@@ -455,7 +453,7 @@ define void @saxpy_vec(i64 %n, float %a, float* nocapture readonly %x, float* no
 ; CHECK-NEXT:    vfmacc.vf v16, fa0, v8
 ; CHECK-NEXT:    vse32.v v16, (a2)
 ; CHECK-NEXT:    sub a0, a0, a3
-; CHECK-NEXT:    vsetvli a3, a0, e32, m8, ta, mu
+; CHECK-NEXT:    vsetvli a3, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    add a2, a2, a4
 ; CHECK-NEXT:    bnez a3, .LBB8_1
 ; CHECK-NEXT:  .LBB8_2: # %for.end
@@ -585,14 +583,13 @@ declare <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.nxv2i16(<vscale x 2 x i32
 define void @vlmax(i64 %N, double* %c, double* %a, double* %b) {
 ; CHECK-LABEL: vlmax:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a6, zero, e64, m1, ta, mu
+; CHECK-NEXT:    vsetvli a6, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    blez a0, .LBB11_3
 ; CHECK-NEXT:  # %bb.1: # %for.body.preheader
 ; CHECK-NEXT:    li a5, 0
 ; CHECK-NEXT:    slli a4, a6, 3
 ; CHECK-NEXT:  .LBB11_2: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vle64.v v8, (a2)
 ; CHECK-NEXT:    vle64.v v9, (a3)
 ; CHECK-NEXT:    vfadd.vv v8, v8, v9
@@ -633,7 +630,7 @@ for.end:                                          ; preds = %for.body, %entry
 define void @vector_init_vlmax(i64 %N, double* %c) {
 ; CHECK-LABEL: vector_init_vlmax:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, zero, e64, m1, ta, mu
+; CHECK-NEXT:    vsetvli a2, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    blez a0, .LBB12_3
 ; CHECK-NEXT:  # %bb.1: # %for.body.preheader
 ; CHECK-NEXT:    li a3, 0
@@ -641,7 +638,6 @@ define void @vector_init_vlmax(i64 %N, double* %c) {
 ; CHECK-NEXT:    vmv.v.i v8, 0
 ; CHECK-NEXT:  .LBB12_2: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vse64.v v8, (a1)
 ; CHECK-NEXT:    add a3, a3, a2
 ; CHECK-NEXT:    add a1, a1, a4
@@ -670,7 +666,7 @@ for.end:                                          ; preds = %for.body, %entry
 define void @vector_init_vsetvli_N(i64 %N, double* %c) {
 ; CHECK-LABEL: vector_init_vsetvli_N:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a0, e64, m1, ta, mu
+; CHECK-NEXT:    vsetvli a2, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    blez a0, .LBB13_3
 ; CHECK-NEXT:  # %bb.1: # %for.body.preheader
 ; CHECK-NEXT:    li a3, 0
@@ -709,7 +705,7 @@ define void @vector_init_vsetvli_fv(i64 %N, double* %c) {
 ; CHECK-LABEL: vector_init_vsetvli_fv:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    li a2, 0
-; CHECK-NEXT:    vsetivli a3, 4, e64, m1, ta, mu
+; CHECK-NEXT:    vsetivli a3, 4, e64, m1, ta, ma
 ; CHECK-NEXT:    slli a4, a3, 3
 ; CHECK-NEXT:    vsetvli a5, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vmv.v.i v8, 0
@@ -835,7 +831,7 @@ define <vscale x 2 x i32> @pre_lmul(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y
 ; CHECK-LABEL: pre_lmul:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    andi a0, a0, 1
-; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
+; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    vadd.vv v8, v8, v9

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll
index 0aa09d1e34441..1a462576a7b3d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll
@@ -18,7 +18,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vle.mask.nxv1i64(
 define <vscale x 1 x double> @test1(i64 %avl, <vscale x 1 x double> %a, <vscale x 1 x double> %b) nounwind {
 ; CHECK-LABEL: test1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32, mf2, ta, mu
+; CHECK-NEXT:    vsetvli a0, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
@@ -51,6 +51,7 @@ entry:
 define <vscale x 1 x i64> @test3(i64 %avl, <vscale x 1 x i64> %a, <vscale x 1 x i64>* %b, <vscale x 1 x i1> %c) nounwind {
 ; CHECK-LABEL: test3:
 ; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
 ; CHECK-NEXT:    vle64.v v8, (a1), v0.t
 ; CHECK-NEXT:    ret
@@ -86,7 +87,7 @@ entry:
 define <vscale x 1 x i1> @test5(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %avl) nounwind {
 ; CHECK-LABEL: test5:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vmseq.vv v8, v8, v9
 ; CHECK-NEXT:    vmand.mm v0, v8, v0
 ; CHECK-NEXT:    ret
@@ -103,23 +104,22 @@ declare <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1.i64(<vscale x 1 x i1>, <vscal
 define void @test6(i32* nocapture readonly %A, i32* nocapture %B, i64 %n) {
 ; CHECK-LABEL: test6:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a3, a2, e32, m1, ta, mu
+; CHECK-NEXT:    vsetvli a3, a2, e32, m1, ta, ma
 ; CHECK-NEXT:    beqz a3, .LBB5_3
 ; CHECK-NEXT:  # %bb.1: # %for.body.preheader
 ; CHECK-NEXT:    li a4, 0
 ; CHECK-NEXT:  .LBB5_2: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    slli a5, a4, 2
-; CHECK-NEXT:    add a6, a0, a5
-; CHECK-NEXT:    vsetvli zero, a3, e32, m1, ta, ma
-; CHECK-NEXT:    vle32.v v8, (a6)
+; CHECK-NEXT:    slli a6, a4, 2
+; CHECK-NEXT:    add a5, a0, a6
+; CHECK-NEXT:    vle32.v v8, (a5)
 ; CHECK-NEXT:    vmsle.vi v9, v8, -3
 ; CHECK-NEXT:    vmsgt.vi v10, v8, 2
 ; CHECK-NEXT:    vmor.mm v0, v9, v10
-; CHECK-NEXT:    add a5, a5, a1
-; CHECK-NEXT:    vse32.v v8, (a5), v0.t
+; CHECK-NEXT:    add a6, a6, a1
+; CHECK-NEXT:    vse32.v v8, (a6), v0.t
 ; CHECK-NEXT:    add a4, a4, a3
-; CHECK-NEXT:    vsetvli a3, a2, e32, m1, ta, mu
+; CHECK-NEXT:    vsetvli a3, a2, e32, m1, ta, ma
 ; CHECK-NEXT:    bnez a3, .LBB5_2
 ; CHECK-NEXT:  .LBB5_3: # %for.cond.cleanup
 ; CHECK-NEXT:    ret
@@ -259,7 +259,7 @@ entry:
 define <vscale x 1 x double> @test14(i64 %avl, <vscale x 1 x double> %a, <vscale x 1 x double> %b) nounwind {
 ; CHECK-LABEL: test14:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e32, mf2, ta, mu
+; CHECK-NEXT:    vsetvli a0, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-NEXT:    vfadd.vv v8, v8, v9
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
@@ -308,7 +308,7 @@ entry:
 define <vscale x 1 x double> @test16(i64 %avl, double %a, <vscale x 1 x double> %b) nounwind {
 ; CHECK-LABEL: test16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e64, mf2, ta, mu
+; CHECK-NEXT:    vsetvli a0, a0, e64, mf2, ta, ma
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vfmv.v.f v9, fa0
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
@@ -399,7 +399,7 @@ entry:
 define i64 @avl_forward1(<vscale x 2 x i32> %v, <vscale x 2 x i32>* %p) nounwind {
 ; CHECK-LABEL: avl_forward1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetivli a1, 6, e32, m1, ta, mu
+; CHECK-NEXT:    vsetivli a1, 6, e32, m1, ta, ma
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    mv a0, a1
 ; CHECK-NEXT:    ret
@@ -413,7 +413,7 @@ entry:
 define i64 @avl_forward1b_neg(<vscale x 2 x i32> %v, <vscale x 2 x i32>* %p) nounwind {
 ; CHECK-LABEL: avl_forward1b_neg:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetivli a1, 6, e16, m1, ta, mu
+; CHECK-NEXT:    vsetivli a1, 6, e16, m1, ta, ma
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    mv a0, a1
@@ -427,7 +427,7 @@ entry:
 define i64 @avl_forward2(<vscale x 2 x i32> %v, <vscale x 2 x i32>* %p) nounwind {
 ; CHECK-LABEL: avl_forward2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
+; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    mv a0, a1
 ; CHECK-NEXT:    ret
@@ -442,7 +442,7 @@ entry:
 define void @avl_forward3(<vscale x 2 x i32> %v, <vscale x 2 x i32>* %p, i64 %reg) nounwind {
 ; CHECK-LABEL: avl_forward3:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
 entry:
@@ -455,7 +455,7 @@ entry:
 define i64 @avl_forward3b(<vscale x 2 x i32> %v, <vscale x 2 x i32>* %p, i64 %reg) nounwind {
 ; CHECK-LABEL: avl_forward3b:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32, m1, ta, mu
+; CHECK-NEXT:    vsetvli a1, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    mv a0, a1
 ; CHECK-NEXT:    ret
@@ -469,7 +469,7 @@ entry:
 define void @avl_forward4(<vscale x 2 x i32> %v, <vscale x 2 x i32>* %p, i64 %reg) nounwind {
 ; CHECK-LABEL: avl_forward4:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16, m1, ta, mu
+; CHECK-NEXT:    vsetvli a1, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
@@ -483,7 +483,7 @@ entry:
 define i64 @avl_forward4b(<vscale x 2 x i32> %v, <vscale x 2 x i32>* %p, i64 %reg) nounwind {
 ; CHECK-LABEL: avl_forward4b:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16, m1, ta, mu
+; CHECK-NEXT:    vsetvli a1, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    mv a0, a1
@@ -499,7 +499,7 @@ entry:
 define <vscale x 1 x i64> @vleNff(i64* %str, i64 %n, i64 %x) {
 ; CHECK-LABEL: vleNff:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8, m4, ta, mu
+; CHECK-NEXT:    vsetvli a1, a1, e8, m4, ta, ma
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vle64ff.v v8, (a0)
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, tu, ma
@@ -520,7 +520,7 @@ entry:
 define <vscale x 1 x i64> @vleNff2(i64* %str, i64 %n, i64 %x) {
 ; CHECK-LABEL: vleNff2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8, m4, ta, mu
+; CHECK-NEXT:    vsetvli a1, a1, e8, m4, ta, ma
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vle64ff.v v8, (a0)
 ; CHECK-NEXT:    vadd.vx v8, v8, a2
@@ -546,7 +546,7 @@ define <vscale x 2 x i32> @avl_forward5(<vscale x 2 x i32>* %addr) {
 ; CHECK-LABEL: avl_forward5:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    li a1, 32
-; CHECK-NEXT:    vsetvli a1, a1, e8, m4, ta, mu
+; CHECK-NEXT:    vsetvli a1, a1, e8, m4, ta, ma
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vle32.v v8, (a0)
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-intrinsics.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-intrinsics.ll
index 7335d1c666448..8d059c51cccab 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-intrinsics.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-intrinsics.ll
@@ -10,7 +10,7 @@ declare iXLen @llvm.riscv.vsetvlimax.iXLen(iXLen, iXLen)
 define iXLen @test_vsetvli_e8m1(iXLen %avl) nounwind {
 ; CHECK-LABEL: test_vsetvli_e8m1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, a0, e8, m1, ta, mu
+; CHECK-NEXT:    vsetvli a0, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    ret
   %vl = call iXLen @llvm.riscv.vsetvli.iXLen(iXLen %avl, iXLen 0, iXLen 0)
   ret iXLen %vl
@@ -19,7 +19,7 @@ define iXLen @test_vsetvli_e8m1(iXLen %avl) nounwind {
 define iXLen @test_vsetvli_e16mf4(iXLen %avl) nounwind {
 ; CHECK-LABEL: test_vsetvli_e16mf4:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, a0, e16, mf4, ta, mu
+; CHECK-NEXT:    vsetvli a0, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    ret
   %vl = call iXLen @llvm.riscv.vsetvli.iXLen(iXLen %avl, iXLen 1, iXLen 6)
   ret iXLen %vl
@@ -28,7 +28,7 @@ define iXLen @test_vsetvli_e16mf4(iXLen %avl) nounwind {
 define iXLen @test_vsetvli_e64mf8(iXLen %avl) nounwind {
 ; CHECK-LABEL: test_vsetvli_e64mf8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, a0, e64, mf8, ta, mu
+; CHECK-NEXT:    vsetvli a0, a0, e64, mf8, ta, ma
 ; CHECK-NEXT:    ret
   %vl = call iXLen @llvm.riscv.vsetvli.iXLen(iXLen %avl, iXLen 3, iXLen 5)
   ret iXLen %vl
@@ -37,7 +37,7 @@ define iXLen @test_vsetvli_e64mf8(iXLen %avl) nounwind {
 define iXLen @test_vsetvli_e8mf2_zero_avl() nounwind {
 ; CHECK-LABEL: test_vsetvli_e8mf2_zero_avl:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli a0, 0, e8, mf2, ta, mu
+; CHECK-NEXT:    vsetivli a0, 0, e8, mf2, ta, ma
 ; CHECK-NEXT:    ret
   %vl = call iXLen @llvm.riscv.vsetvli.iXLen(iXLen 0, iXLen 0, iXLen 7)
   ret iXLen %vl
@@ -46,7 +46,7 @@ define iXLen @test_vsetvli_e8mf2_zero_avl() nounwind {
 define iXLen @test_vsetvli_e32mf8_zero_avl() nounwind {
 ; CHECK-LABEL: test_vsetvli_e32mf8_zero_avl:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli a0, 0, e16, mf4, ta, mu
+; CHECK-NEXT:    vsetivli a0, 0, e16, mf4, ta, ma
 ; CHECK-NEXT:    ret
   %vl = call iXLen @llvm.riscv.vsetvli.iXLen(iXLen 0, iXLen 1, iXLen 6)
   ret iXLen %vl
@@ -55,7 +55,7 @@ define iXLen @test_vsetvli_e32mf8_zero_avl() nounwind {
 define iXLen @test_vsetvlimax_e32m2() nounwind {
 ; CHECK-LABEL: test_vsetvlimax_e32m2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    ret
   %vl = call iXLen @llvm.riscv.vsetvlimax.iXLen(iXLen 2, iXLen 1)
   ret iXLen %vl
@@ -64,7 +64,7 @@ define iXLen @test_vsetvlimax_e32m2() nounwind {
 define iXLen @test_vsetvlimax_e64m4() nounwind {
 ; CHECK-LABEL: test_vsetvlimax_e64m4:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
+; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    ret
   %vl = call iXLen @llvm.riscv.vsetvlimax.iXLen(iXLen 3, iXLen 2)
   ret iXLen %vl
@@ -73,7 +73,7 @@ define iXLen @test_vsetvlimax_e64m4() nounwind {
 define iXLen @test_vsetvlimax_e64m8() nounwind {
 ; CHECK-LABEL: test_vsetvlimax_e64m8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, mu
+; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    ret
   %vl = call iXLen @llvm.riscv.vsetvlimax.iXLen(iXLen 3, iXLen 3)
   ret iXLen %vl
@@ -102,7 +102,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vle.nxv4i32.iXLen(<vscale x 4 x i32>, <vs
 define <vscale x 4 x i32> @redundant_vsetvli(iXLen %avl, <vscale x 4 x i32>* %ptr) nounwind {
 ; CHECK-LABEL: redundant_vsetvli:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vle32.v v8, (a1)
 ; CHECK-NEXT:    ret
   %vl = call iXLen @llvm.riscv.vsetvli.iXLen(iXLen %avl, iXLen 2, iXLen 1)
@@ -117,8 +117,8 @@ define <vscale x 4 x i32> @redundant_vsetvli(iXLen %avl, <vscale x 4 x i32>* %pt
 define <vscale x 4 x i32> @repeated_vsetvli(iXLen %avl, <vscale x 4 x i32>* %ptr) nounwind {
 ; CHECK-LABEL: repeated_vsetvli:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT:    vsetvli a0, a0, e32, m2, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vle32.v v8, (a1)
 ; CHECK-NEXT:    ret
   %vl0 = call iXLen @llvm.riscv.vsetvli.iXLen(iXLen %avl, iXLen 2, iXLen 1)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vslide1down-constant-vl-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vslide1down-constant-vl-rv32.ll
index 51502fc76cf8d..95dc7d15945b8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vslide1down-constant-vl-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vslide1down-constant-vl-rv32.ll
@@ -51,7 +51,7 @@ define <vscale x 1 x i64> @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl2(<vsc
 ;
 ; CHECK-64-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl2:
 ; CHECK-64:       # %bb.0: # %entry
-; CHECK-64-NEXT:    vsetvli a2, zero, e32, m1, ta, mu
+; CHECK-64-NEXT:    vsetvli a2, zero, e32, m1, ta, ma
 ; CHECK-64-NEXT:    vslide1down.vx v8, v8, a0
 ; CHECK-64-NEXT:    vslide1down.vx v8, v8, a1
 ; CHECK-64-NEXT:    ret
@@ -68,7 +68,7 @@ entry:
 define <vscale x 1 x i64> @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl3(<vscale x 1 x i64> %0, i64 %1) nounwind {
 ; CHECK-128-65536-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl3:
 ; CHECK-128-65536:       # %bb.0: # %entry
-; CHECK-128-65536-NEXT:    vsetivli a2, 3, e64, m1, ta, mu
+; CHECK-128-65536-NEXT:    vsetivli a2, 3, e64, m1, ta, ma
 ; CHECK-128-65536-NEXT:    slli a2, a2, 1
 ; CHECK-128-65536-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
 ; CHECK-128-65536-NEXT:    vslide1down.vx v8, v8, a0
@@ -84,7 +84,7 @@ define <vscale x 1 x i64> @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl3(<vsc
 ;
 ; CHECK-64-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl3:
 ; CHECK-64:       # %bb.0: # %entry
-; CHECK-64-NEXT:    vsetvli a2, zero, e32, m1, ta, mu
+; CHECK-64-NEXT:    vsetvli a2, zero, e32, m1, ta, ma
 ; CHECK-64-NEXT:    vslide1down.vx v8, v8, a0
 ; CHECK-64-NEXT:    vslide1down.vx v8, v8, a1
 ; CHECK-64-NEXT:    ret
@@ -101,7 +101,7 @@ entry:
 define <vscale x 1 x i64> @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl8(<vscale x 1 x i64> %0, i64 %1) nounwind {
 ; CHECK-128-65536-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl8:
 ; CHECK-128-65536:       # %bb.0: # %entry
-; CHECK-128-65536-NEXT:    vsetivli a2, 8, e64, m1, ta, mu
+; CHECK-128-65536-NEXT:    vsetivli a2, 8, e64, m1, ta, ma
 ; CHECK-128-65536-NEXT:    slli a2, a2, 1
 ; CHECK-128-65536-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
 ; CHECK-128-65536-NEXT:    vslide1down.vx v8, v8, a0
@@ -117,7 +117,7 @@ define <vscale x 1 x i64> @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl8(<vsc
 ;
 ; CHECK-64-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl8:
 ; CHECK-64:       # %bb.0: # %entry
-; CHECK-64-NEXT:    vsetvli a2, zero, e32, m1, ta, mu
+; CHECK-64-NEXT:    vsetvli a2, zero, e32, m1, ta, ma
 ; CHECK-64-NEXT:    vslide1down.vx v8, v8, a0
 ; CHECK-64-NEXT:    vslide1down.vx v8, v8, a1
 ; CHECK-64-NEXT:    ret
@@ -134,7 +134,7 @@ entry:
 define <vscale x 1 x i64> @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl9(<vscale x 1 x i64> %0, i64 %1) nounwind {
 ; CHECK-128-65536-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl9:
 ; CHECK-128-65536:       # %bb.0: # %entry
-; CHECK-128-65536-NEXT:    vsetivli a2, 9, e64, m1, ta, mu
+; CHECK-128-65536-NEXT:    vsetivli a2, 9, e64, m1, ta, ma
 ; CHECK-128-65536-NEXT:    slli a2, a2, 1
 ; CHECK-128-65536-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
 ; CHECK-128-65536-NEXT:    vslide1down.vx v8, v8, a0
@@ -143,7 +143,7 @@ define <vscale x 1 x i64> @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl9(<vsc
 ;
 ; CHECK-512-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl9:
 ; CHECK-512:       # %bb.0: # %entry
-; CHECK-512-NEXT:    vsetivli a2, 9, e64, m1, ta, mu
+; CHECK-512-NEXT:    vsetivli a2, 9, e64, m1, ta, ma
 ; CHECK-512-NEXT:    slli a2, a2, 1
 ; CHECK-512-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
 ; CHECK-512-NEXT:    vslide1down.vx v8, v8, a0
@@ -152,7 +152,7 @@ define <vscale x 1 x i64> @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl9(<vsc
 ;
 ; CHECK-64-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl9:
 ; CHECK-64:       # %bb.0: # %entry
-; CHECK-64-NEXT:    vsetvli a2, zero, e32, m1, ta, mu
+; CHECK-64-NEXT:    vsetvli a2, zero, e32, m1, ta, ma
 ; CHECK-64-NEXT:    vslide1down.vx v8, v8, a0
 ; CHECK-64-NEXT:    vslide1down.vx v8, v8, a1
 ; CHECK-64-NEXT:    ret
@@ -169,7 +169,7 @@ entry:
 define <vscale x 1 x i64> @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl15(<vscale x 1 x i64> %0, i64 %1) nounwind {
 ; CHECK-128-65536-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl15:
 ; CHECK-128-65536:       # %bb.0: # %entry
-; CHECK-128-65536-NEXT:    vsetivli a2, 15, e64, m1, ta, mu
+; CHECK-128-65536-NEXT:    vsetivli a2, 15, e64, m1, ta, ma
 ; CHECK-128-65536-NEXT:    slli a2, a2, 1
 ; CHECK-128-65536-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
 ; CHECK-128-65536-NEXT:    vslide1down.vx v8, v8, a0
@@ -178,7 +178,7 @@ define <vscale x 1 x i64> @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl15(<vs
 ;
 ; CHECK-512-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl15:
 ; CHECK-512:       # %bb.0: # %entry
-; CHECK-512-NEXT:    vsetivli a2, 15, e64, m1, ta, mu
+; CHECK-512-NEXT:    vsetivli a2, 15, e64, m1, ta, ma
 ; CHECK-512-NEXT:    slli a2, a2, 1
 ; CHECK-512-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
 ; CHECK-512-NEXT:    vslide1down.vx v8, v8, a0
@@ -187,7 +187,7 @@ define <vscale x 1 x i64> @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl15(<vs
 ;
 ; CHECK-64-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl15:
 ; CHECK-64:       # %bb.0: # %entry
-; CHECK-64-NEXT:    vsetvli a2, zero, e32, m1, ta, mu
+; CHECK-64-NEXT:    vsetvli a2, zero, e32, m1, ta, ma
 ; CHECK-64-NEXT:    vslide1down.vx v8, v8, a0
 ; CHECK-64-NEXT:    vslide1down.vx v8, v8, a1
 ; CHECK-64-NEXT:    ret
@@ -204,7 +204,7 @@ entry:
 define <vscale x 1 x i64> @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl16(<vscale x 1 x i64> %0, i64 %1) nounwind {
 ; CHECK-128-65536-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl16:
 ; CHECK-128-65536:       # %bb.0: # %entry
-; CHECK-128-65536-NEXT:    vsetivli a2, 16, e64, m1, ta, mu
+; CHECK-128-65536-NEXT:    vsetivli a2, 16, e64, m1, ta, ma
 ; CHECK-128-65536-NEXT:    slli a2, a2, 1
 ; CHECK-128-65536-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
 ; CHECK-128-65536-NEXT:    vslide1down.vx v8, v8, a0
@@ -213,14 +213,14 @@ define <vscale x 1 x i64> @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl16(<vs
 ;
 ; CHECK-512-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl16:
 ; CHECK-512:       # %bb.0: # %entry
-; CHECK-512-NEXT:    vsetvli a2, zero, e32, m1, ta, mu
+; CHECK-512-NEXT:    vsetvli a2, zero, e32, m1, ta, ma
 ; CHECK-512-NEXT:    vslide1down.vx v8, v8, a0
 ; CHECK-512-NEXT:    vslide1down.vx v8, v8, a1
 ; CHECK-512-NEXT:    ret
 ;
 ; CHECK-64-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl16:
 ; CHECK-64:       # %bb.0: # %entry
-; CHECK-64-NEXT:    vsetvli a2, zero, e32, m1, ta, mu
+; CHECK-64-NEXT:    vsetvli a2, zero, e32, m1, ta, ma
 ; CHECK-64-NEXT:    vslide1down.vx v8, v8, a0
 ; CHECK-64-NEXT:    vslide1down.vx v8, v8, a1
 ; CHECK-64-NEXT:    ret
@@ -238,7 +238,7 @@ define <vscale x 1 x i64> @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl2047(<
 ; CHECK-128-65536-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl2047:
 ; CHECK-128-65536:       # %bb.0: # %entry
 ; CHECK-128-65536-NEXT:    li a2, 2047
-; CHECK-128-65536-NEXT:    vsetvli a2, a2, e64, m1, ta, mu
+; CHECK-128-65536-NEXT:    vsetvli a2, a2, e64, m1, ta, ma
 ; CHECK-128-65536-NEXT:    slli a2, a2, 1
 ; CHECK-128-65536-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
 ; CHECK-128-65536-NEXT:    vslide1down.vx v8, v8, a0
@@ -247,14 +247,14 @@ define <vscale x 1 x i64> @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl2047(<
 ;
 ; CHECK-512-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl2047:
 ; CHECK-512:       # %bb.0: # %entry
-; CHECK-512-NEXT:    vsetvli a2, zero, e32, m1, ta, mu
+; CHECK-512-NEXT:    vsetvli a2, zero, e32, m1, ta, ma
 ; CHECK-512-NEXT:    vslide1down.vx v8, v8, a0
 ; CHECK-512-NEXT:    vslide1down.vx v8, v8, a1
 ; CHECK-512-NEXT:    ret
 ;
 ; CHECK-64-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl2047:
 ; CHECK-64:       # %bb.0: # %entry
-; CHECK-64-NEXT:    vsetvli a2, zero, e32, m1, ta, mu
+; CHECK-64-NEXT:    vsetvli a2, zero, e32, m1, ta, ma
 ; CHECK-64-NEXT:    vslide1down.vx v8, v8, a0
 ; CHECK-64-NEXT:    vslide1down.vx v8, v8, a1
 ; CHECK-64-NEXT:    ret
@@ -271,7 +271,7 @@ entry:
 define <vscale x 1 x i64> @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl2048(<vscale x 1 x i64> %0, i64 %1) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl2048:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, zero, e32, m1, ta, mu
+; CHECK-NEXT:    vsetvli a2, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a1
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv32.ll
index 1edcf2cb081da..27253e28aca8d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv32.ll
@@ -856,7 +856,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vslide1down.nxv1i64.i64(
 define <vscale x 1 x i64> @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a2, e64, m1, ta, mu
+; CHECK-NEXT:    vsetvli a2, a2, e64, m1, ta, ma
 ; CHECK-NEXT:    slli a2, a2, 1
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
@@ -883,7 +883,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vslide1down.mask.nxv1i64.i64(
 define <vscale x 1 x i64> @intrinsic_vslide1down_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv1i64_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a3, a2, e64, m1, ta, mu
+; CHECK-NEXT:    vsetvli a3, a2, e64, m1, ta, ma
 ; CHECK-NEXT:    slli a3, a3, 1
 ; CHECK-NEXT:    vsetvli zero, a3, e32, m1, ta, ma
 ; CHECK-NEXT:    vslide1down.vx v9, v9, a0
@@ -911,7 +911,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vslide1down.nxv2i64.i64(
 define <vscale x 2 x i64> @intrinsic_vslide1down_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv2i64_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a2, e64, m2, ta, mu
+; CHECK-NEXT:    vsetvli a2, a2, e64, m2, ta, ma
 ; CHECK-NEXT:    slli a2, a2, 1
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, ma
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
@@ -938,7 +938,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vslide1down.mask.nxv2i64.i64(
 define <vscale x 2 x i64> @intrinsic_vslide1down_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv2i64_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a3, a2, e64, m2, ta, mu
+; CHECK-NEXT:    vsetvli a3, a2, e64, m2, ta, ma
 ; CHECK-NEXT:    slli a3, a3, 1
 ; CHECK-NEXT:    vsetvli zero, a3, e32, m2, ta, ma
 ; CHECK-NEXT:    vslide1down.vx v10, v10, a0
@@ -966,7 +966,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vslide1down.nxv4i64.i64(
 define <vscale x 4 x i64> @intrinsic_vslide1down_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv4i64_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a2, e64, m4, ta, mu
+; CHECK-NEXT:    vsetvli a2, a2, e64, m4, ta, ma
 ; CHECK-NEXT:    slli a2, a2, 1
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m4, ta, ma
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
@@ -993,7 +993,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vslide1down.mask.nxv4i64.i64(
 define <vscale x 4 x i64> @intrinsic_vslide1down_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv4i64_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a3, a2, e64, m4, ta, mu
+; CHECK-NEXT:    vsetvli a3, a2, e64, m4, ta, ma
 ; CHECK-NEXT:    slli a3, a3, 1
 ; CHECK-NEXT:    vsetvli zero, a3, e32, m4, ta, ma
 ; CHECK-NEXT:    vslide1down.vx v12, v12, a0
@@ -1021,7 +1021,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vslide1down.nxv8i64.i64(
 define <vscale x 8 x i64> @intrinsic_vslide1down_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv8i64_nxv8i64_i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a2, e64, m8, ta, mu
+; CHECK-NEXT:    vsetvli a2, a2, e64, m8, ta, ma
 ; CHECK-NEXT:    slli a2, a2, 1
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
@@ -1048,7 +1048,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vslide1down.mask.nxv8i64.i64(
 define <vscale x 8 x i64> @intrinsic_vslide1down_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv8i64_nxv8i64_i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a3, a2, e64, m8, ta, mu
+; CHECK-NEXT:    vsetvli a3, a2, e64, m8, ta, ma
 ; CHECK-NEXT:    slli a3, a3, 1
 ; CHECK-NEXT:    vsetvli zero, a3, e32, m8, ta, ma
 ; CHECK-NEXT:    vslide1down.vx v16, v16, a0

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vslide1up-constant-vl-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vslide1up-constant-vl-rv32.ll
index fe3a2096eab50..c3c3c632aa096 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vslide1up-constant-vl-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vslide1up-constant-vl-rv32.ll
@@ -51,7 +51,7 @@ define <vscale x 1 x i64> @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl2(<vscal
 ;
 ; CHECK-64-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl2:
 ; CHECK-64:       # %bb.0: # %entry
-; CHECK-64-NEXT:    vsetvli a2, zero, e32, m1, ta, mu
+; CHECK-64-NEXT:    vsetvli a2, zero, e32, m1, ta, ma
 ; CHECK-64-NEXT:    vslide1up.vx v9, v8, a1
 ; CHECK-64-NEXT:    vslide1up.vx v8, v9, a0
 ; CHECK-64-NEXT:    ret
@@ -68,7 +68,7 @@ entry:
 define <vscale x 1 x i64> @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl3(<vscale x 1 x i64> %0, i64 %1) nounwind {
 ; CHECK-128-65536-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl3:
 ; CHECK-128-65536:       # %bb.0: # %entry
-; CHECK-128-65536-NEXT:    vsetivli a2, 3, e64, m1, ta, mu
+; CHECK-128-65536-NEXT:    vsetivli a2, 3, e64, m1, ta, ma
 ; CHECK-128-65536-NEXT:    slli a2, a2, 1
 ; CHECK-128-65536-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
 ; CHECK-128-65536-NEXT:    vslide1up.vx v9, v8, a1
@@ -84,7 +84,7 @@ define <vscale x 1 x i64> @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl3(<vscal
 ;
 ; CHECK-64-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl3:
 ; CHECK-64:       # %bb.0: # %entry
-; CHECK-64-NEXT:    vsetvli a2, zero, e32, m1, ta, mu
+; CHECK-64-NEXT:    vsetvli a2, zero, e32, m1, ta, ma
 ; CHECK-64-NEXT:    vslide1up.vx v9, v8, a1
 ; CHECK-64-NEXT:    vslide1up.vx v8, v9, a0
 ; CHECK-64-NEXT:    ret
@@ -101,7 +101,7 @@ entry:
 define <vscale x 1 x i64> @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl8(<vscale x 1 x i64> %0, i64 %1) nounwind {
 ; CHECK-128-65536-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl8:
 ; CHECK-128-65536:       # %bb.0: # %entry
-; CHECK-128-65536-NEXT:    vsetivli a2, 8, e64, m1, ta, mu
+; CHECK-128-65536-NEXT:    vsetivli a2, 8, e64, m1, ta, ma
 ; CHECK-128-65536-NEXT:    slli a2, a2, 1
 ; CHECK-128-65536-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
 ; CHECK-128-65536-NEXT:    vslide1up.vx v9, v8, a1
@@ -117,7 +117,7 @@ define <vscale x 1 x i64> @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl8(<vscal
 ;
 ; CHECK-64-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl8:
 ; CHECK-64:       # %bb.0: # %entry
-; CHECK-64-NEXT:    vsetvli a2, zero, e32, m1, ta, mu
+; CHECK-64-NEXT:    vsetvli a2, zero, e32, m1, ta, ma
 ; CHECK-64-NEXT:    vslide1up.vx v9, v8, a1
 ; CHECK-64-NEXT:    vslide1up.vx v8, v9, a0
 ; CHECK-64-NEXT:    ret
@@ -134,7 +134,7 @@ entry:
 define <vscale x 1 x i64> @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl9(<vscale x 1 x i64> %0, i64 %1) nounwind {
 ; CHECK-128-65536-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl9:
 ; CHECK-128-65536:       # %bb.0: # %entry
-; CHECK-128-65536-NEXT:    vsetivli a2, 9, e64, m1, ta, mu
+; CHECK-128-65536-NEXT:    vsetivli a2, 9, e64, m1, ta, ma
 ; CHECK-128-65536-NEXT:    slli a2, a2, 1
 ; CHECK-128-65536-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
 ; CHECK-128-65536-NEXT:    vslide1up.vx v9, v8, a1
@@ -143,7 +143,7 @@ define <vscale x 1 x i64> @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl9(<vscal
 ;
 ; CHECK-512-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl9:
 ; CHECK-512:       # %bb.0: # %entry
-; CHECK-512-NEXT:    vsetivli a2, 9, e64, m1, ta, mu
+; CHECK-512-NEXT:    vsetivli a2, 9, e64, m1, ta, ma
 ; CHECK-512-NEXT:    slli a2, a2, 1
 ; CHECK-512-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
 ; CHECK-512-NEXT:    vslide1up.vx v9, v8, a1
@@ -152,7 +152,7 @@ define <vscale x 1 x i64> @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl9(<vscal
 ;
 ; CHECK-64-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl9:
 ; CHECK-64:       # %bb.0: # %entry
-; CHECK-64-NEXT:    vsetvli a2, zero, e32, m1, ta, mu
+; CHECK-64-NEXT:    vsetvli a2, zero, e32, m1, ta, ma
 ; CHECK-64-NEXT:    vslide1up.vx v9, v8, a1
 ; CHECK-64-NEXT:    vslide1up.vx v8, v9, a0
 ; CHECK-64-NEXT:    ret
@@ -169,7 +169,7 @@ entry:
 define <vscale x 1 x i64> @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl15(<vscale x 1 x i64> %0, i64 %1) nounwind {
 ; CHECK-128-65536-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl15:
 ; CHECK-128-65536:       # %bb.0: # %entry
-; CHECK-128-65536-NEXT:    vsetivli a2, 15, e64, m1, ta, mu
+; CHECK-128-65536-NEXT:    vsetivli a2, 15, e64, m1, ta, ma
 ; CHECK-128-65536-NEXT:    slli a2, a2, 1
 ; CHECK-128-65536-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
 ; CHECK-128-65536-NEXT:    vslide1up.vx v9, v8, a1
@@ -178,7 +178,7 @@ define <vscale x 1 x i64> @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl15(<vsca
 ;
 ; CHECK-512-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl15:
 ; CHECK-512:       # %bb.0: # %entry
-; CHECK-512-NEXT:    vsetivli a2, 15, e64, m1, ta, mu
+; CHECK-512-NEXT:    vsetivli a2, 15, e64, m1, ta, ma
 ; CHECK-512-NEXT:    slli a2, a2, 1
 ; CHECK-512-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
 ; CHECK-512-NEXT:    vslide1up.vx v9, v8, a1
@@ -187,7 +187,7 @@ define <vscale x 1 x i64> @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl15(<vsca
 ;
 ; CHECK-64-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl15:
 ; CHECK-64:       # %bb.0: # %entry
-; CHECK-64-NEXT:    vsetvli a2, zero, e32, m1, ta, mu
+; CHECK-64-NEXT:    vsetvli a2, zero, e32, m1, ta, ma
 ; CHECK-64-NEXT:    vslide1up.vx v9, v8, a1
 ; CHECK-64-NEXT:    vslide1up.vx v8, v9, a0
 ; CHECK-64-NEXT:    ret
@@ -204,7 +204,7 @@ entry:
 define <vscale x 1 x i64> @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl16(<vscale x 1 x i64> %0, i64 %1) nounwind {
 ; CHECK-128-65536-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl16:
 ; CHECK-128-65536:       # %bb.0: # %entry
-; CHECK-128-65536-NEXT:    vsetivli a2, 16, e64, m1, ta, mu
+; CHECK-128-65536-NEXT:    vsetivli a2, 16, e64, m1, ta, ma
 ; CHECK-128-65536-NEXT:    slli a2, a2, 1
 ; CHECK-128-65536-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
 ; CHECK-128-65536-NEXT:    vslide1up.vx v9, v8, a1
@@ -213,14 +213,14 @@ define <vscale x 1 x i64> @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl16(<vsca
 ;
 ; CHECK-512-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl16:
 ; CHECK-512:       # %bb.0: # %entry
-; CHECK-512-NEXT:    vsetvli a2, zero, e32, m1, ta, mu
+; CHECK-512-NEXT:    vsetvli a2, zero, e32, m1, ta, ma
 ; CHECK-512-NEXT:    vslide1up.vx v9, v8, a1
 ; CHECK-512-NEXT:    vslide1up.vx v8, v9, a0
 ; CHECK-512-NEXT:    ret
 ;
 ; CHECK-64-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl16:
 ; CHECK-64:       # %bb.0: # %entry
-; CHECK-64-NEXT:    vsetvli a2, zero, e32, m1, ta, mu
+; CHECK-64-NEXT:    vsetvli a2, zero, e32, m1, ta, ma
 ; CHECK-64-NEXT:    vslide1up.vx v9, v8, a1
 ; CHECK-64-NEXT:    vslide1up.vx v8, v9, a0
 ; CHECK-64-NEXT:    ret
@@ -238,7 +238,7 @@ define <vscale x 1 x i64> @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl2047(<vs
 ; CHECK-128-65536-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl2047:
 ; CHECK-128-65536:       # %bb.0: # %entry
 ; CHECK-128-65536-NEXT:    li a2, 2047
-; CHECK-128-65536-NEXT:    vsetvli a2, a2, e64, m1, ta, mu
+; CHECK-128-65536-NEXT:    vsetvli a2, a2, e64, m1, ta, ma
 ; CHECK-128-65536-NEXT:    slli a2, a2, 1
 ; CHECK-128-65536-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
 ; CHECK-128-65536-NEXT:    vslide1up.vx v9, v8, a1
@@ -247,14 +247,14 @@ define <vscale x 1 x i64> @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl2047(<vs
 ;
 ; CHECK-512-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl2047:
 ; CHECK-512:       # %bb.0: # %entry
-; CHECK-512-NEXT:    vsetvli a2, zero, e32, m1, ta, mu
+; CHECK-512-NEXT:    vsetvli a2, zero, e32, m1, ta, ma
 ; CHECK-512-NEXT:    vslide1up.vx v9, v8, a1
 ; CHECK-512-NEXT:    vslide1up.vx v8, v9, a0
 ; CHECK-512-NEXT:    ret
 ;
 ; CHECK-64-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl2047:
 ; CHECK-64:       # %bb.0: # %entry
-; CHECK-64-NEXT:    vsetvli a2, zero, e32, m1, ta, mu
+; CHECK-64-NEXT:    vsetvli a2, zero, e32, m1, ta, ma
 ; CHECK-64-NEXT:    vslide1up.vx v9, v8, a1
 ; CHECK-64-NEXT:    vslide1up.vx v8, v9, a0
 ; CHECK-64-NEXT:    ret
@@ -271,7 +271,7 @@ entry:
 define <vscale x 1 x i64> @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl2048(<vscale x 1 x i64> %0, i64 %1) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl2048:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, zero, e32, m1, ta, mu
+; CHECK-NEXT:    vsetvli a2, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vslide1up.vx v9, v8, a1
 ; CHECK-NEXT:    vslide1up.vx v8, v9, a0
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv32.ll
index 648d77b0ea4b8..6484dbe5fa173 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv32.ll
@@ -874,7 +874,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vslide1up.nxv1i64.i64(
 define <vscale x 1 x i64> @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a2, e64, m1, ta, mu
+; CHECK-NEXT:    vsetvli a2, a2, e64, m1, ta, ma
 ; CHECK-NEXT:    slli a2, a2, 1
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
 ; CHECK-NEXT:    vslide1up.vx v9, v8, a1
@@ -901,7 +901,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vslide1up.mask.nxv1i64.i64(
 define <vscale x 1 x i64> @intrinsic_vslide1up_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv1i64_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a3, a2, e64, m1, ta, mu
+; CHECK-NEXT:    vsetvli a3, a2, e64, m1, ta, ma
 ; CHECK-NEXT:    slli a3, a3, 1
 ; CHECK-NEXT:    vsetvli zero, a3, e32, m1, ta, ma
 ; CHECK-NEXT:    vslide1up.vx v10, v9, a1
@@ -929,7 +929,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vslide1up.nxv2i64.i64(
 define <vscale x 2 x i64> @intrinsic_vslide1up_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv2i64_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a2, e64, m2, ta, mu
+; CHECK-NEXT:    vsetvli a2, a2, e64, m2, ta, ma
 ; CHECK-NEXT:    slli a2, a2, 1
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, ma
 ; CHECK-NEXT:    vslide1up.vx v10, v8, a1
@@ -956,7 +956,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vslide1up.mask.nxv2i64.i64(
 define <vscale x 2 x i64> @intrinsic_vslide1up_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv2i64_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a3, a2, e64, m2, ta, mu
+; CHECK-NEXT:    vsetvli a3, a2, e64, m2, ta, ma
 ; CHECK-NEXT:    slli a3, a3, 1
 ; CHECK-NEXT:    vsetvli zero, a3, e32, m2, ta, ma
 ; CHECK-NEXT:    vslide1up.vx v12, v10, a1
@@ -984,7 +984,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vslide1up.nxv4i64.i64(
 define <vscale x 4 x i64> @intrinsic_vslide1up_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv4i64_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a2, e64, m4, ta, mu
+; CHECK-NEXT:    vsetvli a2, a2, e64, m4, ta, ma
 ; CHECK-NEXT:    slli a2, a2, 1
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m4, ta, ma
 ; CHECK-NEXT:    vslide1up.vx v12, v8, a1
@@ -1011,7 +1011,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vslide1up.mask.nxv4i64.i64(
 define <vscale x 4 x i64> @intrinsic_vslide1up_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv4i64_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a3, a2, e64, m4, ta, mu
+; CHECK-NEXT:    vsetvli a3, a2, e64, m4, ta, ma
 ; CHECK-NEXT:    slli a3, a3, 1
 ; CHECK-NEXT:    vsetvli zero, a3, e32, m4, ta, ma
 ; CHECK-NEXT:    vslide1up.vx v16, v12, a1
@@ -1039,7 +1039,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vslide1up.nxv8i64.i64(
 define <vscale x 8 x i64> @intrinsic_vslide1up_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv8i64_nxv8i64_i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a2, e64, m8, ta, mu
+; CHECK-NEXT:    vsetvli a2, a2, e64, m8, ta, ma
 ; CHECK-NEXT:    slli a2, a2, 1
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
 ; CHECK-NEXT:    vslide1up.vx v16, v8, a1
@@ -1066,7 +1066,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vslide1up.mask.nxv8i64.i64(
 define <vscale x 8 x i64> @intrinsic_vslide1up_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv8i64_nxv8i64_i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a3, a2, e64, m8, ta, mu
+; CHECK-NEXT:    vsetvli a3, a2, e64, m8, ta, ma
 ; CHECK-NEXT:    slli a3, a3, 1
 ; CHECK-NEXT:    vsetvli zero, a3, e32, m8, ta, ma
 ; CHECK-NEXT:    vslide1up.vx v24, v16, a1

diff  --git a/llvm/test/Transforms/LoopStrengthReduce/RISCV/lsr-drop-solution.ll b/llvm/test/Transforms/LoopStrengthReduce/RISCV/lsr-drop-solution.ll
index f62f14140509f..f0d22c9a33b01 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/RISCV/lsr-drop-solution.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/RISCV/lsr-drop-solution.ll
@@ -7,7 +7,7 @@ target triple = "riscv64-unknown-linux-gnu"
 define ptr @foo(ptr %a0, ptr %a1, i64 %a2) {
 ; CHECK-LABEL: foo:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a4, a2, e8, m8, ta, mu
+; CHECK-NEXT:    vsetvli a4, a2, e8, m8, ta, ma
 ; CHECK-NEXT:    bne a4, a2, .LBB0_2
 ; CHECK-NEXT:  # %bb.1:
 ; CHECK-NEXT:    mv a3, a0
@@ -29,7 +29,7 @@ define ptr @foo(ptr %a0, ptr %a1, i64 %a2) {
 ; CHECK-NEXT:    bltu a3, a5, .LBB0_3
 ; CHECK-NEXT:  # %bb.4: # %do.end
 ; CHECK-NEXT:    sub a2, a2, a3
-; CHECK-NEXT:    vsetvli a2, a2, e8, m8, ta, mu
+; CHECK-NEXT:    vsetvli a2, a2, e8, m8, ta, ma
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
 ; CHECK-NEXT:    vle8.v v8, (a1)
 ; CHECK-NEXT:    vse8.v v8, (a3)


        


More information about the llvm-commits mailing list