[llvm] df7ab6a - [RISCV] Add ANDI to getRegAllocationHints.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Wed Nov 30 21:02:32 PST 2022


Author: Craig Topper
Date: 2022-11-30T20:59:02-08:00
New Revision: df7ab6a52e302b63837a34a15e3f35455fea2929

URL: https://github.com/llvm/llvm-project/commit/df7ab6a52e302b63837a34a15e3f35455fea2929
DIFF: https://github.com/llvm/llvm-project/commit/df7ab6a52e302b63837a34a15e3f35455fea2929.diff

LOG: [RISCV] Add ANDI to getRegAllocationHints.

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
    llvm/test/CodeGen/RISCV/bittest.ll
    llvm/test/CodeGen/RISCV/branch.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
    llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll
    llvm/test/CodeGen/RISCV/select-optimize-multiple.ll
    llvm/test/CodeGen/RISCV/setcc-logic.ll
    llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
    llvm/test/CodeGen/RISCV/urem-vector-lkk.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
index f059853268630..14191f9874791 100644
--- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
@@ -446,6 +446,9 @@ bool RISCVRegisterInfo::getRegAllocationHints(
     switch (MI.getOpcode()) {
     default:
       return false;
+    case RISCV::ANDI:
+      NeedGPRC = true;
+      return MI.getOperand(2).isImm() && isInt<6>(MI.getOperand(2).getImm());
     case RISCV::SRAI:
     case RISCV::SRLI:
       NeedGPRC = true;

diff  --git a/llvm/test/CodeGen/RISCV/bittest.ll b/llvm/test/CodeGen/RISCV/bittest.ll
index 73dc9f0e0a757..c952c37229006 100644
--- a/llvm/test/CodeGen/RISCV/bittest.ll
+++ b/llvm/test/CodeGen/RISCV/bittest.ll
@@ -1524,8 +1524,8 @@ define void @bit_31_nz_branch_i64(i64 %0) {
 define void @bit_32_z_branch_i64(i64 %0) {
 ; RV32-LABEL: bit_32_z_branch_i64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    andi a0, a1, 1
-; RV32-NEXT:    bnez a0, .LBB53_2
+; RV32-NEXT:    andi a1, a1, 1
+; RV32-NEXT:    bnez a1, .LBB53_2
 ; RV32-NEXT:  # %bb.1:
 ; RV32-NEXT:    tail bar at plt
 ; RV32-NEXT:  .LBB53_2:
@@ -1554,8 +1554,8 @@ define void @bit_32_z_branch_i64(i64 %0) {
 define void @bit_32_nz_branch_i64(i64 %0) {
 ; RV32-LABEL: bit_32_nz_branch_i64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    andi a0, a1, 1
-; RV32-NEXT:    beqz a0, .LBB54_2
+; RV32-NEXT:    andi a1, a1, 1
+; RV32-NEXT:    beqz a1, .LBB54_2
 ; RV32-NEXT:  # %bb.1:
 ; RV32-NEXT:    tail bar at plt
 ; RV32-NEXT:  .LBB54_2:

diff  --git a/llvm/test/CodeGen/RISCV/branch.ll b/llvm/test/CodeGen/RISCV/branch.ll
index dca90d9e889a6..d2f3e15b5c058 100644
--- a/llvm/test/CodeGen/RISCV/branch.ll
+++ b/llvm/test/CodeGen/RISCV/branch.ll
@@ -36,8 +36,8 @@ define void @foo(i32 %a, i32 *%b, i1 %c) nounwind {
 ; RV32I-NEXT:    bgeu a0, a3, .LBB0_14
 ; RV32I-NEXT:  # %bb.10: # %test11
 ; RV32I-NEXT:    lw a0, 0(a1)
-; RV32I-NEXT:    andi a0, a2, 1
-; RV32I-NEXT:    bnez a0, .LBB0_14
+; RV32I-NEXT:    andi a2, a2, 1
+; RV32I-NEXT:    bnez a2, .LBB0_14
 ; RV32I-NEXT:  # %bb.11: # %test12
 ; RV32I-NEXT:    lw a0, 0(a1)
 ; RV32I-NEXT:    bgez a0, .LBB0_14

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
index 84a1d41c7ed4b..818bef4bf9045 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
@@ -80,16 +80,16 @@ define <2 x i8> @mgather_v2i8(<2 x i8*> %ptrs, <2 x i1> %m, <2 x i8> %passthru)
 ; RV64ZVE32F-NEXT:    andi a3, a2, 1
 ; RV64ZVE32F-NEXT:    bnez a3, .LBB1_3
 ; RV64ZVE32F-NEXT:  # %bb.1: # %else
-; RV64ZVE32F-NEXT:    andi a0, a2, 2
-; RV64ZVE32F-NEXT:    bnez a0, .LBB1_4
+; RV64ZVE32F-NEXT:    andi a2, a2, 2
+; RV64ZVE32F-NEXT:    bnez a2, .LBB1_4
 ; RV64ZVE32F-NEXT:  .LBB1_2: # %else2
 ; RV64ZVE32F-NEXT:    ret
 ; RV64ZVE32F-NEXT:  .LBB1_3: # %cond.load
 ; RV64ZVE32F-NEXT:    lb a0, 0(a0)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 2, e8, mf4, tu, ma
 ; RV64ZVE32F-NEXT:    vmv.s.x v8, a0
-; RV64ZVE32F-NEXT:    andi a0, a2, 2
-; RV64ZVE32F-NEXT:    beqz a0, .LBB1_2
+; RV64ZVE32F-NEXT:    andi a2, a2, 2
+; RV64ZVE32F-NEXT:    beqz a2, .LBB1_2
 ; RV64ZVE32F-NEXT:  .LBB1_4: # %cond.load1
 ; RV64ZVE32F-NEXT:    lb a0, 0(a1)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
@@ -137,8 +137,8 @@ define <2 x i16> @mgather_v2i8_sextload_v2i16(<2 x i8*> %ptrs, <2 x i1> %m, <2 x
 ; RV64ZVE32F-NEXT:    vsetivli zero, 2, e8, mf4, tu, ma
 ; RV64ZVE32F-NEXT:    vmv.s.x v8, a0
 ; RV64ZVE32F-NEXT:  .LBB2_2: # %else
-; RV64ZVE32F-NEXT:    andi a0, a2, 2
-; RV64ZVE32F-NEXT:    beqz a0, .LBB2_4
+; RV64ZVE32F-NEXT:    andi a2, a2, 2
+; RV64ZVE32F-NEXT:    beqz a2, .LBB2_4
 ; RV64ZVE32F-NEXT:  # %bb.3: # %cond.load1
 ; RV64ZVE32F-NEXT:    lb a0, 0(a1)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
@@ -191,8 +191,8 @@ define <2 x i16> @mgather_v2i8_zextload_v2i16(<2 x i8*> %ptrs, <2 x i1> %m, <2 x
 ; RV64ZVE32F-NEXT:    vsetivli zero, 2, e8, mf4, tu, ma
 ; RV64ZVE32F-NEXT:    vmv.s.x v8, a0
 ; RV64ZVE32F-NEXT:  .LBB3_2: # %else
-; RV64ZVE32F-NEXT:    andi a0, a2, 2
-; RV64ZVE32F-NEXT:    beqz a0, .LBB3_4
+; RV64ZVE32F-NEXT:    andi a2, a2, 2
+; RV64ZVE32F-NEXT:    beqz a2, .LBB3_4
 ; RV64ZVE32F-NEXT:  # %bb.3: # %cond.load1
 ; RV64ZVE32F-NEXT:    lb a0, 0(a1)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
@@ -245,8 +245,8 @@ define <2 x i32> @mgather_v2i8_sextload_v2i32(<2 x i8*> %ptrs, <2 x i1> %m, <2 x
 ; RV64ZVE32F-NEXT:    vsetivli zero, 2, e8, mf4, tu, ma
 ; RV64ZVE32F-NEXT:    vmv.s.x v8, a0
 ; RV64ZVE32F-NEXT:  .LBB4_2: # %else
-; RV64ZVE32F-NEXT:    andi a0, a2, 2
-; RV64ZVE32F-NEXT:    beqz a0, .LBB4_4
+; RV64ZVE32F-NEXT:    andi a2, a2, 2
+; RV64ZVE32F-NEXT:    beqz a2, .LBB4_4
 ; RV64ZVE32F-NEXT:  # %bb.3: # %cond.load1
 ; RV64ZVE32F-NEXT:    lb a0, 0(a1)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
@@ -299,8 +299,8 @@ define <2 x i32> @mgather_v2i8_zextload_v2i32(<2 x i8*> %ptrs, <2 x i1> %m, <2 x
 ; RV64ZVE32F-NEXT:    vsetivli zero, 2, e8, mf4, tu, ma
 ; RV64ZVE32F-NEXT:    vmv.s.x v8, a0
 ; RV64ZVE32F-NEXT:  .LBB5_2: # %else
-; RV64ZVE32F-NEXT:    andi a0, a2, 2
-; RV64ZVE32F-NEXT:    beqz a0, .LBB5_4
+; RV64ZVE32F-NEXT:    andi a2, a2, 2
+; RV64ZVE32F-NEXT:    beqz a2, .LBB5_4
 ; RV64ZVE32F-NEXT:  # %bb.3: # %cond.load1
 ; RV64ZVE32F-NEXT:    lb a0, 0(a1)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
@@ -361,8 +361,8 @@ define <2 x i64> @mgather_v2i8_sextload_v2i64(<2 x i8*> %ptrs, <2 x i1> %m, <2 x
 ; RV64ZVE32F-NEXT:    vsetivli zero, 2, e8, mf4, tu, ma
 ; RV64ZVE32F-NEXT:    vmv.s.x v8, a0
 ; RV64ZVE32F-NEXT:  .LBB6_2: # %else
-; RV64ZVE32F-NEXT:    andi a0, a2, 2
-; RV64ZVE32F-NEXT:    beqz a0, .LBB6_4
+; RV64ZVE32F-NEXT:    andi a2, a2, 2
+; RV64ZVE32F-NEXT:    beqz a2, .LBB6_4
 ; RV64ZVE32F-NEXT:  # %bb.3: # %cond.load1
 ; RV64ZVE32F-NEXT:    lb a0, 0(a1)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
@@ -424,8 +424,8 @@ define <2 x i64> @mgather_v2i8_zextload_v2i64(<2 x i8*> %ptrs, <2 x i1> %m, <2 x
 ; RV64ZVE32F-NEXT:    vsetivli zero, 2, e8, mf4, tu, ma
 ; RV64ZVE32F-NEXT:    vmv.s.x v8, a0
 ; RV64ZVE32F-NEXT:  .LBB7_2: # %else
-; RV64ZVE32F-NEXT:    andi a0, a2, 2
-; RV64ZVE32F-NEXT:    beqz a0, .LBB7_4
+; RV64ZVE32F-NEXT:    andi a2, a2, 2
+; RV64ZVE32F-NEXT:    beqz a2, .LBB7_4
 ; RV64ZVE32F-NEXT:  # %bb.3: # %cond.load1
 ; RV64ZVE32F-NEXT:    lb a0, 0(a1)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
@@ -930,16 +930,16 @@ define <2 x i16> @mgather_v2i16(<2 x i16*> %ptrs, <2 x i1> %m, <2 x i16> %passth
 ; RV64ZVE32F-NEXT:    andi a3, a2, 1
 ; RV64ZVE32F-NEXT:    bnez a3, .LBB14_3
 ; RV64ZVE32F-NEXT:  # %bb.1: # %else
-; RV64ZVE32F-NEXT:    andi a0, a2, 2
-; RV64ZVE32F-NEXT:    bnez a0, .LBB14_4
+; RV64ZVE32F-NEXT:    andi a2, a2, 2
+; RV64ZVE32F-NEXT:    bnez a2, .LBB14_4
 ; RV64ZVE32F-NEXT:  .LBB14_2: # %else2
 ; RV64ZVE32F-NEXT:    ret
 ; RV64ZVE32F-NEXT:  .LBB14_3: # %cond.load
 ; RV64ZVE32F-NEXT:    lh a0, 0(a0)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 2, e16, mf2, tu, ma
 ; RV64ZVE32F-NEXT:    vmv.s.x v8, a0
-; RV64ZVE32F-NEXT:    andi a0, a2, 2
-; RV64ZVE32F-NEXT:    beqz a0, .LBB14_2
+; RV64ZVE32F-NEXT:    andi a2, a2, 2
+; RV64ZVE32F-NEXT:    beqz a2, .LBB14_2
 ; RV64ZVE32F-NEXT:  .LBB14_4: # %cond.load1
 ; RV64ZVE32F-NEXT:    lh a0, 0(a1)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 2, e16, mf2, ta, ma
@@ -987,8 +987,8 @@ define <2 x i32> @mgather_v2i16_sextload_v2i32(<2 x i16*> %ptrs, <2 x i1> %m, <2
 ; RV64ZVE32F-NEXT:    vsetivli zero, 2, e16, mf2, tu, ma
 ; RV64ZVE32F-NEXT:    vmv.s.x v8, a0
 ; RV64ZVE32F-NEXT:  .LBB15_2: # %else
-; RV64ZVE32F-NEXT:    andi a0, a2, 2
-; RV64ZVE32F-NEXT:    beqz a0, .LBB15_4
+; RV64ZVE32F-NEXT:    andi a2, a2, 2
+; RV64ZVE32F-NEXT:    beqz a2, .LBB15_4
 ; RV64ZVE32F-NEXT:  # %bb.3: # %cond.load1
 ; RV64ZVE32F-NEXT:    lh a0, 0(a1)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 2, e16, mf2, ta, ma
@@ -1041,8 +1041,8 @@ define <2 x i32> @mgather_v2i16_zextload_v2i32(<2 x i16*> %ptrs, <2 x i1> %m, <2
 ; RV64ZVE32F-NEXT:    vsetivli zero, 2, e16, mf2, tu, ma
 ; RV64ZVE32F-NEXT:    vmv.s.x v8, a0
 ; RV64ZVE32F-NEXT:  .LBB16_2: # %else
-; RV64ZVE32F-NEXT:    andi a0, a2, 2
-; RV64ZVE32F-NEXT:    beqz a0, .LBB16_4
+; RV64ZVE32F-NEXT:    andi a2, a2, 2
+; RV64ZVE32F-NEXT:    beqz a2, .LBB16_4
 ; RV64ZVE32F-NEXT:  # %bb.3: # %cond.load1
 ; RV64ZVE32F-NEXT:    lh a0, 0(a1)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 2, e16, mf2, ta, ma
@@ -1103,8 +1103,8 @@ define <2 x i64> @mgather_v2i16_sextload_v2i64(<2 x i16*> %ptrs, <2 x i1> %m, <2
 ; RV64ZVE32F-NEXT:    vsetivli zero, 2, e16, mf2, tu, ma
 ; RV64ZVE32F-NEXT:    vmv.s.x v8, a0
 ; RV64ZVE32F-NEXT:  .LBB17_2: # %else
-; RV64ZVE32F-NEXT:    andi a0, a2, 2
-; RV64ZVE32F-NEXT:    beqz a0, .LBB17_4
+; RV64ZVE32F-NEXT:    andi a2, a2, 2
+; RV64ZVE32F-NEXT:    beqz a2, .LBB17_4
 ; RV64ZVE32F-NEXT:  # %bb.3: # %cond.load1
 ; RV64ZVE32F-NEXT:    lh a0, 0(a1)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 2, e16, mf2, ta, ma
@@ -1168,8 +1168,8 @@ define <2 x i64> @mgather_v2i16_zextload_v2i64(<2 x i16*> %ptrs, <2 x i1> %m, <2
 ; RV64ZVE32F-NEXT:    vsetivli zero, 2, e16, mf2, tu, ma
 ; RV64ZVE32F-NEXT:    vmv.s.x v8, a0
 ; RV64ZVE32F-NEXT:  .LBB18_2: # %else
-; RV64ZVE32F-NEXT:    andi a0, a2, 2
-; RV64ZVE32F-NEXT:    beqz a0, .LBB18_4
+; RV64ZVE32F-NEXT:    andi a2, a2, 2
+; RV64ZVE32F-NEXT:    beqz a2, .LBB18_4
 ; RV64ZVE32F-NEXT:  # %bb.3: # %cond.load1
 ; RV64ZVE32F-NEXT:    lh a0, 0(a1)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 2, e16, mf2, ta, ma
@@ -2148,16 +2148,16 @@ define <2 x i32> @mgather_v2i32(<2 x i32*> %ptrs, <2 x i1> %m, <2 x i32> %passth
 ; RV64ZVE32F-NEXT:    andi a3, a2, 1
 ; RV64ZVE32F-NEXT:    bnez a3, .LBB28_3
 ; RV64ZVE32F-NEXT:  # %bb.1: # %else
-; RV64ZVE32F-NEXT:    andi a0, a2, 2
-; RV64ZVE32F-NEXT:    bnez a0, .LBB28_4
+; RV64ZVE32F-NEXT:    andi a2, a2, 2
+; RV64ZVE32F-NEXT:    bnez a2, .LBB28_4
 ; RV64ZVE32F-NEXT:  .LBB28_2: # %else2
 ; RV64ZVE32F-NEXT:    ret
 ; RV64ZVE32F-NEXT:  .LBB28_3: # %cond.load
 ; RV64ZVE32F-NEXT:    lw a0, 0(a0)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vmv.s.x v8, a0
-; RV64ZVE32F-NEXT:    andi a0, a2, 2
-; RV64ZVE32F-NEXT:    beqz a0, .LBB28_2
+; RV64ZVE32F-NEXT:    andi a2, a2, 2
+; RV64ZVE32F-NEXT:    beqz a2, .LBB28_2
 ; RV64ZVE32F-NEXT:  .LBB28_4: # %cond.load1
 ; RV64ZVE32F-NEXT:    lw a0, 0(a1)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 2, e32, m1, ta, ma
@@ -2214,8 +2214,8 @@ define <2 x i64> @mgather_v2i32_sextload_v2i64(<2 x i32*> %ptrs, <2 x i1> %m, <2
 ; RV64ZVE32F-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vmv.s.x v8, a0
 ; RV64ZVE32F-NEXT:  .LBB29_2: # %else
-; RV64ZVE32F-NEXT:    andi a0, a2, 2
-; RV64ZVE32F-NEXT:    beqz a0, .LBB29_4
+; RV64ZVE32F-NEXT:    andi a2, a2, 2
+; RV64ZVE32F-NEXT:    beqz a2, .LBB29_4
 ; RV64ZVE32F-NEXT:  # %bb.3: # %cond.load1
 ; RV64ZVE32F-NEXT:    lw a0, 0(a1)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 2, e32, m1, ta, ma
@@ -2274,8 +2274,8 @@ define <2 x i64> @mgather_v2i32_zextload_v2i64(<2 x i32*> %ptrs, <2 x i1> %m, <2
 ; RV64ZVE32F-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vmv.s.x v8, a0
 ; RV64ZVE32F-NEXT:  .LBB30_2: # %else
-; RV64ZVE32F-NEXT:    andi a0, a2, 2
-; RV64ZVE32F-NEXT:    beqz a0, .LBB30_4
+; RV64ZVE32F-NEXT:    andi a2, a2, 2
+; RV64ZVE32F-NEXT:    beqz a2, .LBB30_4
 ; RV64ZVE32F-NEXT:  # %bb.3: # %cond.load1
 ; RV64ZVE32F-NEXT:    lw a0, 0(a1)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 2, e32, m1, ta, ma
@@ -3750,8 +3750,8 @@ define <2 x i64> @mgather_v2i64(<2 x i64*> %ptrs, <2 x i1> %m, <2 x i64> %passth
 ; RV64ZVE32F-NEXT:  # %bb.1: # %cond.load
 ; RV64ZVE32F-NEXT:    ld a2, 0(a0)
 ; RV64ZVE32F-NEXT:  .LBB43_2: # %else
-; RV64ZVE32F-NEXT:    andi a0, a4, 2
-; RV64ZVE32F-NEXT:    beqz a0, .LBB43_4
+; RV64ZVE32F-NEXT:    andi a4, a4, 2
+; RV64ZVE32F-NEXT:    beqz a4, .LBB43_4
 ; RV64ZVE32F-NEXT:  # %bb.3: # %cond.load1
 ; RV64ZVE32F-NEXT:    ld a3, 0(a1)
 ; RV64ZVE32F-NEXT:  .LBB43_4: # %else2
@@ -7237,16 +7237,16 @@ define <2 x half> @mgather_v2f16(<2 x half*> %ptrs, <2 x i1> %m, <2 x half> %pas
 ; RV64ZVE32F-NEXT:    andi a3, a2, 1
 ; RV64ZVE32F-NEXT:    bnez a3, .LBB59_3
 ; RV64ZVE32F-NEXT:  # %bb.1: # %else
-; RV64ZVE32F-NEXT:    andi a0, a2, 2
-; RV64ZVE32F-NEXT:    bnez a0, .LBB59_4
+; RV64ZVE32F-NEXT:    andi a2, a2, 2
+; RV64ZVE32F-NEXT:    bnez a2, .LBB59_4
 ; RV64ZVE32F-NEXT:  .LBB59_2: # %else2
 ; RV64ZVE32F-NEXT:    ret
 ; RV64ZVE32F-NEXT:  .LBB59_3: # %cond.load
 ; RV64ZVE32F-NEXT:    flh ft0, 0(a0)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 2, e16, mf2, tu, ma
 ; RV64ZVE32F-NEXT:    vfmv.s.f v8, ft0
-; RV64ZVE32F-NEXT:    andi a0, a2, 2
-; RV64ZVE32F-NEXT:    beqz a0, .LBB59_2
+; RV64ZVE32F-NEXT:    andi a2, a2, 2
+; RV64ZVE32F-NEXT:    beqz a2, .LBB59_2
 ; RV64ZVE32F-NEXT:  .LBB59_4: # %cond.load1
 ; RV64ZVE32F-NEXT:    flh ft0, 0(a1)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 2, e16, mf2, ta, ma
@@ -8214,16 +8214,16 @@ define <2 x float> @mgather_v2f32(<2 x float*> %ptrs, <2 x i1> %m, <2 x float> %
 ; RV64ZVE32F-NEXT:    andi a3, a2, 1
 ; RV64ZVE32F-NEXT:    bnez a3, .LBB69_3
 ; RV64ZVE32F-NEXT:  # %bb.1: # %else
-; RV64ZVE32F-NEXT:    andi a0, a2, 2
-; RV64ZVE32F-NEXT:    bnez a0, .LBB69_4
+; RV64ZVE32F-NEXT:    andi a2, a2, 2
+; RV64ZVE32F-NEXT:    bnez a2, .LBB69_4
 ; RV64ZVE32F-NEXT:  .LBB69_2: # %else2
 ; RV64ZVE32F-NEXT:    ret
 ; RV64ZVE32F-NEXT:  .LBB69_3: # %cond.load
 ; RV64ZVE32F-NEXT:    flw ft0, 0(a0)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vfmv.s.f v8, ft0
-; RV64ZVE32F-NEXT:    andi a0, a2, 2
-; RV64ZVE32F-NEXT:    beqz a0, .LBB69_2
+; RV64ZVE32F-NEXT:    andi a2, a2, 2
+; RV64ZVE32F-NEXT:    beqz a2, .LBB69_2
 ; RV64ZVE32F-NEXT:  .LBB69_4: # %cond.load1
 ; RV64ZVE32F-NEXT:    flw ft0, 0(a1)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 2, e32, m1, ta, ma
@@ -9673,14 +9673,14 @@ define <2 x double> @mgather_v2f64(<2 x double*> %ptrs, <2 x i1> %m, <2 x double
 ; RV64ZVE32F-NEXT:    andi a3, a2, 1
 ; RV64ZVE32F-NEXT:    bnez a3, .LBB82_3
 ; RV64ZVE32F-NEXT:  # %bb.1: # %else
-; RV64ZVE32F-NEXT:    andi a0, a2, 2
-; RV64ZVE32F-NEXT:    bnez a0, .LBB82_4
+; RV64ZVE32F-NEXT:    andi a2, a2, 2
+; RV64ZVE32F-NEXT:    bnez a2, .LBB82_4
 ; RV64ZVE32F-NEXT:  .LBB82_2: # %else2
 ; RV64ZVE32F-NEXT:    ret
 ; RV64ZVE32F-NEXT:  .LBB82_3: # %cond.load
 ; RV64ZVE32F-NEXT:    fld fa0, 0(a0)
-; RV64ZVE32F-NEXT:    andi a0, a2, 2
-; RV64ZVE32F-NEXT:    beqz a0, .LBB82_2
+; RV64ZVE32F-NEXT:    andi a2, a2, 2
+; RV64ZVE32F-NEXT:    beqz a2, .LBB82_2
 ; RV64ZVE32F-NEXT:  .LBB82_4: # %cond.load1
 ; RV64ZVE32F-NEXT:    fld fa1, 0(a1)
 ; RV64ZVE32F-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
index e75067c262822..3a512bc5f221c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
@@ -74,15 +74,15 @@ define void @mscatter_v2i8(<2 x i8> %val, <2 x i8*> %ptrs, <2 x i1> %m) {
 ; RV64ZVE32F-NEXT:    andi a3, a2, 1
 ; RV64ZVE32F-NEXT:    bnez a3, .LBB1_3
 ; RV64ZVE32F-NEXT:  # %bb.1: # %else
-; RV64ZVE32F-NEXT:    andi a0, a2, 2
-; RV64ZVE32F-NEXT:    bnez a0, .LBB1_4
+; RV64ZVE32F-NEXT:    andi a2, a2, 2
+; RV64ZVE32F-NEXT:    bnez a2, .LBB1_4
 ; RV64ZVE32F-NEXT:  .LBB1_2: # %else2
 ; RV64ZVE32F-NEXT:    ret
 ; RV64ZVE32F-NEXT:  .LBB1_3: # %cond.store
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
 ; RV64ZVE32F-NEXT:    vse8.v v8, (a0)
-; RV64ZVE32F-NEXT:    andi a0, a2, 2
-; RV64ZVE32F-NEXT:    beqz a0, .LBB1_2
+; RV64ZVE32F-NEXT:    andi a2, a2, 2
+; RV64ZVE32F-NEXT:    beqz a2, .LBB1_2
 ; RV64ZVE32F-NEXT:  .LBB1_4: # %cond.store1
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
 ; RV64ZVE32F-NEXT:    vslidedown.vi v8, v8, 1
@@ -122,15 +122,15 @@ define void @mscatter_v2i16_truncstore_v2i8(<2 x i16> %val, <2 x i8*> %ptrs, <2
 ; RV64ZVE32F-NEXT:    vnsrl.wi v8, v8, 0
 ; RV64ZVE32F-NEXT:    bnez a3, .LBB2_3
 ; RV64ZVE32F-NEXT:  # %bb.1: # %else
-; RV64ZVE32F-NEXT:    andi a0, a2, 2
-; RV64ZVE32F-NEXT:    bnez a0, .LBB2_4
+; RV64ZVE32F-NEXT:    andi a2, a2, 2
+; RV64ZVE32F-NEXT:    bnez a2, .LBB2_4
 ; RV64ZVE32F-NEXT:  .LBB2_2: # %else2
 ; RV64ZVE32F-NEXT:    ret
 ; RV64ZVE32F-NEXT:  .LBB2_3: # %cond.store
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
 ; RV64ZVE32F-NEXT:    vse8.v v8, (a0)
-; RV64ZVE32F-NEXT:    andi a0, a2, 2
-; RV64ZVE32F-NEXT:    beqz a0, .LBB2_2
+; RV64ZVE32F-NEXT:    andi a2, a2, 2
+; RV64ZVE32F-NEXT:    beqz a2, .LBB2_2
 ; RV64ZVE32F-NEXT:  .LBB2_4: # %cond.store1
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
 ; RV64ZVE32F-NEXT:    vslidedown.vi v8, v8, 1
@@ -179,15 +179,15 @@ define void @mscatter_v2i32_truncstore_v2i8(<2 x i32> %val, <2 x i8*> %ptrs, <2
 ; RV64ZVE32F-NEXT:    vnsrl.wi v8, v8, 0
 ; RV64ZVE32F-NEXT:    bnez a3, .LBB3_3
 ; RV64ZVE32F-NEXT:  # %bb.1: # %else
-; RV64ZVE32F-NEXT:    andi a0, a2, 2
-; RV64ZVE32F-NEXT:    bnez a0, .LBB3_4
+; RV64ZVE32F-NEXT:    andi a2, a2, 2
+; RV64ZVE32F-NEXT:    bnez a2, .LBB3_4
 ; RV64ZVE32F-NEXT:  .LBB3_2: # %else2
 ; RV64ZVE32F-NEXT:    ret
 ; RV64ZVE32F-NEXT:  .LBB3_3: # %cond.store
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
 ; RV64ZVE32F-NEXT:    vse8.v v8, (a0)
-; RV64ZVE32F-NEXT:    andi a0, a2, 2
-; RV64ZVE32F-NEXT:    beqz a0, .LBB3_2
+; RV64ZVE32F-NEXT:    andi a2, a2, 2
+; RV64ZVE32F-NEXT:    beqz a2, .LBB3_2
 ; RV64ZVE32F-NEXT:  .LBB3_4: # %cond.store1
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
 ; RV64ZVE32F-NEXT:    vslidedown.vi v8, v8, 1
@@ -309,8 +309,8 @@ define void @mscatter_v4i8(<4 x i8> %val, <4 x i8*> %ptrs, <4 x i1> %m) {
 ; RV64ZVE32F-NEXT:    andi a0, a3, 4
 ; RV64ZVE32F-NEXT:    bnez a0, .LBB5_7
 ; RV64ZVE32F-NEXT:  .LBB5_3: # %else4
-; RV64ZVE32F-NEXT:    andi a0, a3, 8
-; RV64ZVE32F-NEXT:    bnez a0, .LBB5_8
+; RV64ZVE32F-NEXT:    andi a3, a3, 8
+; RV64ZVE32F-NEXT:    bnez a3, .LBB5_8
 ; RV64ZVE32F-NEXT:  .LBB5_4: # %else6
 ; RV64ZVE32F-NEXT:    ret
 ; RV64ZVE32F-NEXT:  .LBB5_5: # %cond.store
@@ -329,8 +329,8 @@ define void @mscatter_v4i8(<4 x i8> %val, <4 x i8*> %ptrs, <4 x i1> %m) {
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
 ; RV64ZVE32F-NEXT:    vslidedown.vi v9, v8, 2
 ; RV64ZVE32F-NEXT:    vse8.v v9, (a2)
-; RV64ZVE32F-NEXT:    andi a0, a3, 8
-; RV64ZVE32F-NEXT:    beqz a0, .LBB5_4
+; RV64ZVE32F-NEXT:    andi a3, a3, 8
+; RV64ZVE32F-NEXT:    beqz a3, .LBB5_4
 ; RV64ZVE32F-NEXT:  .LBB5_8: # %cond.store5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
 ; RV64ZVE32F-NEXT:    vslidedown.vi v8, v8, 3
@@ -369,8 +369,8 @@ define void @mscatter_truemask_v4i8(<4 x i8> %val, <4 x i8*> %ptrs) {
 ; RV64ZVE32F-NEXT:    andi a0, a3, 4
 ; RV64ZVE32F-NEXT:    bnez a0, .LBB6_7
 ; RV64ZVE32F-NEXT:  .LBB6_3: # %else4
-; RV64ZVE32F-NEXT:    andi a0, a3, 8
-; RV64ZVE32F-NEXT:    bnez a0, .LBB6_8
+; RV64ZVE32F-NEXT:    andi a3, a3, 8
+; RV64ZVE32F-NEXT:    bnez a3, .LBB6_8
 ; RV64ZVE32F-NEXT:  .LBB6_4: # %else6
 ; RV64ZVE32F-NEXT:    ret
 ; RV64ZVE32F-NEXT:  .LBB6_5: # %cond.store
@@ -389,8 +389,8 @@ define void @mscatter_truemask_v4i8(<4 x i8> %val, <4 x i8*> %ptrs) {
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
 ; RV64ZVE32F-NEXT:    vslidedown.vi v9, v8, 2
 ; RV64ZVE32F-NEXT:    vse8.v v9, (a2)
-; RV64ZVE32F-NEXT:    andi a0, a3, 8
-; RV64ZVE32F-NEXT:    beqz a0, .LBB6_4
+; RV64ZVE32F-NEXT:    andi a3, a3, 8
+; RV64ZVE32F-NEXT:    beqz a3, .LBB6_4
 ; RV64ZVE32F-NEXT:  .LBB6_8: # %cond.store5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
 ; RV64ZVE32F-NEXT:    vslidedown.vi v8, v8, 3
@@ -699,15 +699,15 @@ define void @mscatter_v2i16(<2 x i16> %val, <2 x i16*> %ptrs, <2 x i1> %m) {
 ; RV64ZVE32F-NEXT:    andi a3, a2, 1
 ; RV64ZVE32F-NEXT:    bnez a3, .LBB11_3
 ; RV64ZVE32F-NEXT:  # %bb.1: # %else
-; RV64ZVE32F-NEXT:    andi a0, a2, 2
-; RV64ZVE32F-NEXT:    bnez a0, .LBB11_4
+; RV64ZVE32F-NEXT:    andi a2, a2, 2
+; RV64ZVE32F-NEXT:    bnez a2, .LBB11_4
 ; RV64ZVE32F-NEXT:  .LBB11_2: # %else2
 ; RV64ZVE32F-NEXT:    ret
 ; RV64ZVE32F-NEXT:  .LBB11_3: # %cond.store
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
 ; RV64ZVE32F-NEXT:    vse16.v v8, (a0)
-; RV64ZVE32F-NEXT:    andi a0, a2, 2
-; RV64ZVE32F-NEXT:    beqz a0, .LBB11_2
+; RV64ZVE32F-NEXT:    andi a2, a2, 2
+; RV64ZVE32F-NEXT:    beqz a2, .LBB11_2
 ; RV64ZVE32F-NEXT:  .LBB11_4: # %cond.store1
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
 ; RV64ZVE32F-NEXT:    vslidedown.vi v8, v8, 1
@@ -748,15 +748,15 @@ define void @mscatter_v2i32_truncstore_v2i16(<2 x i32> %val, <2 x i16*> %ptrs, <
 ; RV64ZVE32F-NEXT:    andi a3, a2, 1
 ; RV64ZVE32F-NEXT:    bnez a3, .LBB12_3
 ; RV64ZVE32F-NEXT:  # %bb.1: # %else
-; RV64ZVE32F-NEXT:    andi a0, a2, 2
-; RV64ZVE32F-NEXT:    bnez a0, .LBB12_4
+; RV64ZVE32F-NEXT:    andi a2, a2, 2
+; RV64ZVE32F-NEXT:    bnez a2, .LBB12_4
 ; RV64ZVE32F-NEXT:  .LBB12_2: # %else2
 ; RV64ZVE32F-NEXT:    ret
 ; RV64ZVE32F-NEXT:  .LBB12_3: # %cond.store
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
 ; RV64ZVE32F-NEXT:    vse16.v v8, (a0)
-; RV64ZVE32F-NEXT:    andi a0, a2, 2
-; RV64ZVE32F-NEXT:    beqz a0, .LBB12_2
+; RV64ZVE32F-NEXT:    andi a2, a2, 2
+; RV64ZVE32F-NEXT:    beqz a2, .LBB12_2
 ; RV64ZVE32F-NEXT:  .LBB12_4: # %cond.store1
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
 ; RV64ZVE32F-NEXT:    vslidedown.vi v8, v8, 1
@@ -875,8 +875,8 @@ define void @mscatter_v4i16(<4 x i16> %val, <4 x i16*> %ptrs, <4 x i1> %m) {
 ; RV64ZVE32F-NEXT:    andi a0, a3, 4
 ; RV64ZVE32F-NEXT:    bnez a0, .LBB14_7
 ; RV64ZVE32F-NEXT:  .LBB14_3: # %else4
-; RV64ZVE32F-NEXT:    andi a0, a3, 8
-; RV64ZVE32F-NEXT:    bnez a0, .LBB14_8
+; RV64ZVE32F-NEXT:    andi a3, a3, 8
+; RV64ZVE32F-NEXT:    bnez a3, .LBB14_8
 ; RV64ZVE32F-NEXT:  .LBB14_4: # %else6
 ; RV64ZVE32F-NEXT:    ret
 ; RV64ZVE32F-NEXT:  .LBB14_5: # %cond.store
@@ -895,8 +895,8 @@ define void @mscatter_v4i16(<4 x i16> %val, <4 x i16*> %ptrs, <4 x i1> %m) {
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
 ; RV64ZVE32F-NEXT:    vslidedown.vi v9, v8, 2
 ; RV64ZVE32F-NEXT:    vse16.v v9, (a2)
-; RV64ZVE32F-NEXT:    andi a0, a3, 8
-; RV64ZVE32F-NEXT:    beqz a0, .LBB14_4
+; RV64ZVE32F-NEXT:    andi a3, a3, 8
+; RV64ZVE32F-NEXT:    beqz a3, .LBB14_4
 ; RV64ZVE32F-NEXT:  .LBB14_8: # %cond.store5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
 ; RV64ZVE32F-NEXT:    vslidedown.vi v8, v8, 3
@@ -935,8 +935,8 @@ define void @mscatter_truemask_v4i16(<4 x i16> %val, <4 x i16*> %ptrs) {
 ; RV64ZVE32F-NEXT:    andi a0, a3, 4
 ; RV64ZVE32F-NEXT:    bnez a0, .LBB15_7
 ; RV64ZVE32F-NEXT:  .LBB15_3: # %else4
-; RV64ZVE32F-NEXT:    andi a0, a3, 8
-; RV64ZVE32F-NEXT:    bnez a0, .LBB15_8
+; RV64ZVE32F-NEXT:    andi a3, a3, 8
+; RV64ZVE32F-NEXT:    bnez a3, .LBB15_8
 ; RV64ZVE32F-NEXT:  .LBB15_4: # %else6
 ; RV64ZVE32F-NEXT:    ret
 ; RV64ZVE32F-NEXT:  .LBB15_5: # %cond.store
@@ -955,8 +955,8 @@ define void @mscatter_truemask_v4i16(<4 x i16> %val, <4 x i16*> %ptrs) {
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
 ; RV64ZVE32F-NEXT:    vslidedown.vi v9, v8, 2
 ; RV64ZVE32F-NEXT:    vse16.v v9, (a2)
-; RV64ZVE32F-NEXT:    andi a0, a3, 8
-; RV64ZVE32F-NEXT:    beqz a0, .LBB15_4
+; RV64ZVE32F-NEXT:    andi a3, a3, 8
+; RV64ZVE32F-NEXT:    beqz a3, .LBB15_4
 ; RV64ZVE32F-NEXT:  .LBB15_8: # %cond.store5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
 ; RV64ZVE32F-NEXT:    vslidedown.vi v8, v8, 3
@@ -1679,15 +1679,15 @@ define void @mscatter_v2i32(<2 x i32> %val, <2 x i32*> %ptrs, <2 x i1> %m) {
 ; RV64ZVE32F-NEXT:    andi a3, a2, 1
 ; RV64ZVE32F-NEXT:    bnez a3, .LBB23_3
 ; RV64ZVE32F-NEXT:  # %bb.1: # %else
-; RV64ZVE32F-NEXT:    andi a0, a2, 2
-; RV64ZVE32F-NEXT:    bnez a0, .LBB23_4
+; RV64ZVE32F-NEXT:    andi a2, a2, 2
+; RV64ZVE32F-NEXT:    bnez a2, .LBB23_4
 ; RV64ZVE32F-NEXT:  .LBB23_2: # %else2
 ; RV64ZVE32F-NEXT:    ret
 ; RV64ZVE32F-NEXT:  .LBB23_3: # %cond.store
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; RV64ZVE32F-NEXT:    vse32.v v8, (a0)
-; RV64ZVE32F-NEXT:    andi a0, a2, 2
-; RV64ZVE32F-NEXT:    beqz a0, .LBB23_2
+; RV64ZVE32F-NEXT:    andi a2, a2, 2
+; RV64ZVE32F-NEXT:    beqz a2, .LBB23_2
 ; RV64ZVE32F-NEXT:  .LBB23_4: # %cond.store1
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; RV64ZVE32F-NEXT:    vslidedown.vi v8, v8, 1
@@ -1784,8 +1784,8 @@ define void @mscatter_v4i32(<4 x i32> %val, <4 x i32*> %ptrs, <4 x i1> %m) {
 ; RV64ZVE32F-NEXT:    andi a0, a3, 4
 ; RV64ZVE32F-NEXT:    bnez a0, .LBB25_7
 ; RV64ZVE32F-NEXT:  .LBB25_3: # %else4
-; RV64ZVE32F-NEXT:    andi a0, a3, 8
-; RV64ZVE32F-NEXT:    bnez a0, .LBB25_8
+; RV64ZVE32F-NEXT:    andi a3, a3, 8
+; RV64ZVE32F-NEXT:    bnez a3, .LBB25_8
 ; RV64ZVE32F-NEXT:  .LBB25_4: # %else6
 ; RV64ZVE32F-NEXT:    ret
 ; RV64ZVE32F-NEXT:  .LBB25_5: # %cond.store
@@ -1804,8 +1804,8 @@ define void @mscatter_v4i32(<4 x i32> %val, <4 x i32*> %ptrs, <4 x i1> %m) {
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; RV64ZVE32F-NEXT:    vslidedown.vi v9, v8, 2
 ; RV64ZVE32F-NEXT:    vse32.v v9, (a2)
-; RV64ZVE32F-NEXT:    andi a0, a3, 8
-; RV64ZVE32F-NEXT:    beqz a0, .LBB25_4
+; RV64ZVE32F-NEXT:    andi a3, a3, 8
+; RV64ZVE32F-NEXT:    beqz a3, .LBB25_4
 ; RV64ZVE32F-NEXT:  .LBB25_8: # %cond.store5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; RV64ZVE32F-NEXT:    vslidedown.vi v8, v8, 3
@@ -1844,8 +1844,8 @@ define void @mscatter_truemask_v4i32(<4 x i32> %val, <4 x i32*> %ptrs) {
 ; RV64ZVE32F-NEXT:    andi a0, a3, 4
 ; RV64ZVE32F-NEXT:    bnez a0, .LBB26_7
 ; RV64ZVE32F-NEXT:  .LBB26_3: # %else4
-; RV64ZVE32F-NEXT:    andi a0, a3, 8
-; RV64ZVE32F-NEXT:    bnez a0, .LBB26_8
+; RV64ZVE32F-NEXT:    andi a3, a3, 8
+; RV64ZVE32F-NEXT:    bnez a3, .LBB26_8
 ; RV64ZVE32F-NEXT:  .LBB26_4: # %else6
 ; RV64ZVE32F-NEXT:    ret
 ; RV64ZVE32F-NEXT:  .LBB26_5: # %cond.store
@@ -1864,8 +1864,8 @@ define void @mscatter_truemask_v4i32(<4 x i32> %val, <4 x i32*> %ptrs) {
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; RV64ZVE32F-NEXT:    vslidedown.vi v9, v8, 2
 ; RV64ZVE32F-NEXT:    vse32.v v9, (a2)
-; RV64ZVE32F-NEXT:    andi a0, a3, 8
-; RV64ZVE32F-NEXT:    beqz a0, .LBB26_4
+; RV64ZVE32F-NEXT:    andi a3, a3, 8
+; RV64ZVE32F-NEXT:    beqz a3, .LBB26_4
 ; RV64ZVE32F-NEXT:  .LBB26_8: # %cond.store5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; RV64ZVE32F-NEXT:    vslidedown.vi v8, v8, 3
@@ -2995,8 +2995,8 @@ define void @mscatter_v2i64(<2 x i64> %val, <2 x i64*> %ptrs, <2 x i1> %m) {
 ; RV32ZVE32F-NEXT:    andi a4, a3, 1
 ; RV32ZVE32F-NEXT:    bnez a4, .LBB37_3
 ; RV32ZVE32F-NEXT:  # %bb.1: # %else
-; RV32ZVE32F-NEXT:    andi a0, a3, 2
-; RV32ZVE32F-NEXT:    bnez a0, .LBB37_4
+; RV32ZVE32F-NEXT:    andi a3, a3, 2
+; RV32ZVE32F-NEXT:    bnez a3, .LBB37_4
 ; RV32ZVE32F-NEXT:  .LBB37_2: # %else2
 ; RV32ZVE32F-NEXT:    ret
 ; RV32ZVE32F-NEXT:  .LBB37_3: # %cond.store
@@ -3006,8 +3006,8 @@ define void @mscatter_v2i64(<2 x i64> %val, <2 x i64*> %ptrs, <2 x i1> %m) {
 ; RV32ZVE32F-NEXT:    vmv.x.s a5, v8
 ; RV32ZVE32F-NEXT:    sw a4, 4(a5)
 ; RV32ZVE32F-NEXT:    sw a0, 0(a5)
-; RV32ZVE32F-NEXT:    andi a0, a3, 2
-; RV32ZVE32F-NEXT:    beqz a0, .LBB37_2
+; RV32ZVE32F-NEXT:    andi a3, a3, 2
+; RV32ZVE32F-NEXT:    beqz a3, .LBB37_2
 ; RV32ZVE32F-NEXT:  .LBB37_4: # %cond.store1
 ; RV32ZVE32F-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; RV32ZVE32F-NEXT:    vslidedown.vi v8, v8, 1
@@ -3023,14 +3023,14 @@ define void @mscatter_v2i64(<2 x i64> %val, <2 x i64*> %ptrs, <2 x i1> %m) {
 ; RV64ZVE32F-NEXT:    andi a5, a4, 1
 ; RV64ZVE32F-NEXT:    bnez a5, .LBB37_3
 ; RV64ZVE32F-NEXT:  # %bb.1: # %else
-; RV64ZVE32F-NEXT:    andi a0, a4, 2
-; RV64ZVE32F-NEXT:    bnez a0, .LBB37_4
+; RV64ZVE32F-NEXT:    andi a4, a4, 2
+; RV64ZVE32F-NEXT:    bnez a4, .LBB37_4
 ; RV64ZVE32F-NEXT:  .LBB37_2: # %else2
 ; RV64ZVE32F-NEXT:    ret
 ; RV64ZVE32F-NEXT:  .LBB37_3: # %cond.store
 ; RV64ZVE32F-NEXT:    sd a0, 0(a2)
-; RV64ZVE32F-NEXT:    andi a0, a4, 2
-; RV64ZVE32F-NEXT:    beqz a0, .LBB37_2
+; RV64ZVE32F-NEXT:    andi a4, a4, 2
+; RV64ZVE32F-NEXT:    beqz a4, .LBB37_2
 ; RV64ZVE32F-NEXT:  .LBB37_4: # %cond.store1
 ; RV64ZVE32F-NEXT:    sd a1, 0(a3)
 ; RV64ZVE32F-NEXT:    ret
@@ -3072,8 +3072,8 @@ define void @mscatter_v4i64(<4 x i64> %val, <4 x i64*> %ptrs, <4 x i1> %m) {
 ; RV32ZVE32F-NEXT:    andi a0, a5, 4
 ; RV32ZVE32F-NEXT:    bnez a0, .LBB38_7
 ; RV32ZVE32F-NEXT:  .LBB38_3: # %else4
-; RV32ZVE32F-NEXT:    andi a0, a5, 8
-; RV32ZVE32F-NEXT:    bnez a0, .LBB38_8
+; RV32ZVE32F-NEXT:    andi a5, a5, 8
+; RV32ZVE32F-NEXT:    bnez a5, .LBB38_8
 ; RV32ZVE32F-NEXT:  .LBB38_4: # %else6
 ; RV32ZVE32F-NEXT:    ret
 ; RV32ZVE32F-NEXT:  .LBB38_5: # %cond.store
@@ -3099,8 +3099,8 @@ define void @mscatter_v4i64(<4 x i64> %val, <4 x i64*> %ptrs, <4 x i1> %m) {
 ; RV32ZVE32F-NEXT:    vmv.x.s a0, v9
 ; RV32ZVE32F-NEXT:    sw a4, 0(a0)
 ; RV32ZVE32F-NEXT:    sw a3, 4(a0)
-; RV32ZVE32F-NEXT:    andi a0, a5, 8
-; RV32ZVE32F-NEXT:    beqz a0, .LBB38_4
+; RV32ZVE32F-NEXT:    andi a5, a5, 8
+; RV32ZVE32F-NEXT:    beqz a5, .LBB38_4
 ; RV32ZVE32F-NEXT:  .LBB38_8: # %cond.store5
 ; RV32ZVE32F-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; RV32ZVE32F-NEXT:    vslidedown.vi v8, v8, 3
@@ -3185,8 +3185,8 @@ define void @mscatter_truemask_v4i64(<4 x i64> %val, <4 x i64*> %ptrs) {
 ; RV32ZVE32F-NEXT:    andi a0, a5, 4
 ; RV32ZVE32F-NEXT:    bnez a0, .LBB39_7
 ; RV32ZVE32F-NEXT:  .LBB39_3: # %else4
-; RV32ZVE32F-NEXT:    andi a0, a5, 8
-; RV32ZVE32F-NEXT:    bnez a0, .LBB39_8
+; RV32ZVE32F-NEXT:    andi a5, a5, 8
+; RV32ZVE32F-NEXT:    bnez a5, .LBB39_8
 ; RV32ZVE32F-NEXT:  .LBB39_4: # %else6
 ; RV32ZVE32F-NEXT:    ret
 ; RV32ZVE32F-NEXT:  .LBB39_5: # %cond.store
@@ -3212,8 +3212,8 @@ define void @mscatter_truemask_v4i64(<4 x i64> %val, <4 x i64*> %ptrs) {
 ; RV32ZVE32F-NEXT:    vmv.x.s a0, v9
 ; RV32ZVE32F-NEXT:    sw a4, 0(a0)
 ; RV32ZVE32F-NEXT:    sw a3, 4(a0)
-; RV32ZVE32F-NEXT:    andi a0, a5, 8
-; RV32ZVE32F-NEXT:    beqz a0, .LBB39_4
+; RV32ZVE32F-NEXT:    andi a5, a5, 8
+; RV32ZVE32F-NEXT:    beqz a5, .LBB39_4
 ; RV32ZVE32F-NEXT:  .LBB39_8: # %cond.store5
 ; RV32ZVE32F-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; RV32ZVE32F-NEXT:    vslidedown.vi v8, v8, 3
@@ -6131,15 +6131,15 @@ define void @mscatter_v2f16(<2 x half> %val, <2 x half*> %ptrs, <2 x i1> %m) {
 ; RV64ZVE32F-NEXT:    andi a3, a2, 1
 ; RV64ZVE32F-NEXT:    bnez a3, .LBB53_3
 ; RV64ZVE32F-NEXT:  # %bb.1: # %else
-; RV64ZVE32F-NEXT:    andi a0, a2, 2
-; RV64ZVE32F-NEXT:    bnez a0, .LBB53_4
+; RV64ZVE32F-NEXT:    andi a2, a2, 2
+; RV64ZVE32F-NEXT:    bnez a2, .LBB53_4
 ; RV64ZVE32F-NEXT:  .LBB53_2: # %else2
 ; RV64ZVE32F-NEXT:    ret
 ; RV64ZVE32F-NEXT:  .LBB53_3: # %cond.store
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
 ; RV64ZVE32F-NEXT:    vse16.v v8, (a0)
-; RV64ZVE32F-NEXT:    andi a0, a2, 2
-; RV64ZVE32F-NEXT:    beqz a0, .LBB53_2
+; RV64ZVE32F-NEXT:    andi a2, a2, 2
+; RV64ZVE32F-NEXT:    beqz a2, .LBB53_2
 ; RV64ZVE32F-NEXT:  .LBB53_4: # %cond.store1
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
 ; RV64ZVE32F-NEXT:    vslidedown.vi v8, v8, 1
@@ -6180,8 +6180,8 @@ define void @mscatter_v4f16(<4 x half> %val, <4 x half*> %ptrs, <4 x i1> %m) {
 ; RV64ZVE32F-NEXT:    andi a0, a3, 4
 ; RV64ZVE32F-NEXT:    bnez a0, .LBB54_7
 ; RV64ZVE32F-NEXT:  .LBB54_3: # %else4
-; RV64ZVE32F-NEXT:    andi a0, a3, 8
-; RV64ZVE32F-NEXT:    bnez a0, .LBB54_8
+; RV64ZVE32F-NEXT:    andi a3, a3, 8
+; RV64ZVE32F-NEXT:    bnez a3, .LBB54_8
 ; RV64ZVE32F-NEXT:  .LBB54_4: # %else6
 ; RV64ZVE32F-NEXT:    ret
 ; RV64ZVE32F-NEXT:  .LBB54_5: # %cond.store
@@ -6200,8 +6200,8 @@ define void @mscatter_v4f16(<4 x half> %val, <4 x half*> %ptrs, <4 x i1> %m) {
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
 ; RV64ZVE32F-NEXT:    vslidedown.vi v9, v8, 2
 ; RV64ZVE32F-NEXT:    vse16.v v9, (a2)
-; RV64ZVE32F-NEXT:    andi a0, a3, 8
-; RV64ZVE32F-NEXT:    beqz a0, .LBB54_4
+; RV64ZVE32F-NEXT:    andi a3, a3, 8
+; RV64ZVE32F-NEXT:    beqz a3, .LBB54_4
 ; RV64ZVE32F-NEXT:  .LBB54_8: # %cond.store5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
 ; RV64ZVE32F-NEXT:    vslidedown.vi v8, v8, 3
@@ -6240,8 +6240,8 @@ define void @mscatter_truemask_v4f16(<4 x half> %val, <4 x half*> %ptrs) {
 ; RV64ZVE32F-NEXT:    andi a0, a3, 4
 ; RV64ZVE32F-NEXT:    bnez a0, .LBB55_7
 ; RV64ZVE32F-NEXT:  .LBB55_3: # %else4
-; RV64ZVE32F-NEXT:    andi a0, a3, 8
-; RV64ZVE32F-NEXT:    bnez a0, .LBB55_8
+; RV64ZVE32F-NEXT:    andi a3, a3, 8
+; RV64ZVE32F-NEXT:    bnez a3, .LBB55_8
 ; RV64ZVE32F-NEXT:  .LBB55_4: # %else6
 ; RV64ZVE32F-NEXT:    ret
 ; RV64ZVE32F-NEXT:  .LBB55_5: # %cond.store
@@ -6260,8 +6260,8 @@ define void @mscatter_truemask_v4f16(<4 x half> %val, <4 x half*> %ptrs) {
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
 ; RV64ZVE32F-NEXT:    vslidedown.vi v9, v8, 2
 ; RV64ZVE32F-NEXT:    vse16.v v9, (a2)
-; RV64ZVE32F-NEXT:    andi a0, a3, 8
-; RV64ZVE32F-NEXT:    beqz a0, .LBB55_4
+; RV64ZVE32F-NEXT:    andi a3, a3, 8
+; RV64ZVE32F-NEXT:    beqz a3, .LBB55_4
 ; RV64ZVE32F-NEXT:  .LBB55_8: # %cond.store5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
 ; RV64ZVE32F-NEXT:    vslidedown.vi v8, v8, 3
@@ -6984,15 +6984,15 @@ define void @mscatter_v2f32(<2 x float> %val, <2 x float*> %ptrs, <2 x i1> %m) {
 ; RV64ZVE32F-NEXT:    andi a3, a2, 1
 ; RV64ZVE32F-NEXT:    bnez a3, .LBB63_3
 ; RV64ZVE32F-NEXT:  # %bb.1: # %else
-; RV64ZVE32F-NEXT:    andi a0, a2, 2
-; RV64ZVE32F-NEXT:    bnez a0, .LBB63_4
+; RV64ZVE32F-NEXT:    andi a2, a2, 2
+; RV64ZVE32F-NEXT:    bnez a2, .LBB63_4
 ; RV64ZVE32F-NEXT:  .LBB63_2: # %else2
 ; RV64ZVE32F-NEXT:    ret
 ; RV64ZVE32F-NEXT:  .LBB63_3: # %cond.store
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; RV64ZVE32F-NEXT:    vse32.v v8, (a0)
-; RV64ZVE32F-NEXT:    andi a0, a2, 2
-; RV64ZVE32F-NEXT:    beqz a0, .LBB63_2
+; RV64ZVE32F-NEXT:    andi a2, a2, 2
+; RV64ZVE32F-NEXT:    beqz a2, .LBB63_2
 ; RV64ZVE32F-NEXT:  .LBB63_4: # %cond.store1
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; RV64ZVE32F-NEXT:    vslidedown.vi v8, v8, 1
@@ -7033,8 +7033,8 @@ define void @mscatter_v4f32(<4 x float> %val, <4 x float*> %ptrs, <4 x i1> %m) {
 ; RV64ZVE32F-NEXT:    andi a0, a3, 4
 ; RV64ZVE32F-NEXT:    bnez a0, .LBB64_7
 ; RV64ZVE32F-NEXT:  .LBB64_3: # %else4
-; RV64ZVE32F-NEXT:    andi a0, a3, 8
-; RV64ZVE32F-NEXT:    bnez a0, .LBB64_8
+; RV64ZVE32F-NEXT:    andi a3, a3, 8
+; RV64ZVE32F-NEXT:    bnez a3, .LBB64_8
 ; RV64ZVE32F-NEXT:  .LBB64_4: # %else6
 ; RV64ZVE32F-NEXT:    ret
 ; RV64ZVE32F-NEXT:  .LBB64_5: # %cond.store
@@ -7053,8 +7053,8 @@ define void @mscatter_v4f32(<4 x float> %val, <4 x float*> %ptrs, <4 x i1> %m) {
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; RV64ZVE32F-NEXT:    vslidedown.vi v9, v8, 2
 ; RV64ZVE32F-NEXT:    vse32.v v9, (a2)
-; RV64ZVE32F-NEXT:    andi a0, a3, 8
-; RV64ZVE32F-NEXT:    beqz a0, .LBB64_4
+; RV64ZVE32F-NEXT:    andi a3, a3, 8
+; RV64ZVE32F-NEXT:    beqz a3, .LBB64_4
 ; RV64ZVE32F-NEXT:  .LBB64_8: # %cond.store5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; RV64ZVE32F-NEXT:    vslidedown.vi v8, v8, 3
@@ -7093,8 +7093,8 @@ define void @mscatter_truemask_v4f32(<4 x float> %val, <4 x float*> %ptrs) {
 ; RV64ZVE32F-NEXT:    andi a0, a3, 4
 ; RV64ZVE32F-NEXT:    bnez a0, .LBB65_7
 ; RV64ZVE32F-NEXT:  .LBB65_3: # %else4
-; RV64ZVE32F-NEXT:    andi a0, a3, 8
-; RV64ZVE32F-NEXT:    bnez a0, .LBB65_8
+; RV64ZVE32F-NEXT:    andi a3, a3, 8
+; RV64ZVE32F-NEXT:    bnez a3, .LBB65_8
 ; RV64ZVE32F-NEXT:  .LBB65_4: # %else6
 ; RV64ZVE32F-NEXT:    ret
 ; RV64ZVE32F-NEXT:  .LBB65_5: # %cond.store
@@ -7113,8 +7113,8 @@ define void @mscatter_truemask_v4f32(<4 x float> %val, <4 x float*> %ptrs) {
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; RV64ZVE32F-NEXT:    vslidedown.vi v9, v8, 2
 ; RV64ZVE32F-NEXT:    vse32.v v9, (a2)
-; RV64ZVE32F-NEXT:    andi a0, a3, 8
-; RV64ZVE32F-NEXT:    beqz a0, .LBB65_4
+; RV64ZVE32F-NEXT:    andi a3, a3, 8
+; RV64ZVE32F-NEXT:    beqz a3, .LBB65_4
 ; RV64ZVE32F-NEXT:  .LBB65_8: # %cond.store5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; RV64ZVE32F-NEXT:    vslidedown.vi v8, v8, 3
@@ -8265,14 +8265,14 @@ define void @mscatter_v2f64(<2 x double> %val, <2 x double*> %ptrs, <2 x i1> %m)
 ; RV64ZVE32F-NEXT:    andi a3, a2, 1
 ; RV64ZVE32F-NEXT:    bnez a3, .LBB76_3
 ; RV64ZVE32F-NEXT:  # %bb.1: # %else
-; RV64ZVE32F-NEXT:    andi a0, a2, 2
-; RV64ZVE32F-NEXT:    bnez a0, .LBB76_4
+; RV64ZVE32F-NEXT:    andi a2, a2, 2
+; RV64ZVE32F-NEXT:    bnez a2, .LBB76_4
 ; RV64ZVE32F-NEXT:  .LBB76_2: # %else2
 ; RV64ZVE32F-NEXT:    ret
 ; RV64ZVE32F-NEXT:  .LBB76_3: # %cond.store
 ; RV64ZVE32F-NEXT:    fsd fa0, 0(a0)
-; RV64ZVE32F-NEXT:    andi a0, a2, 2
-; RV64ZVE32F-NEXT:    beqz a0, .LBB76_2
+; RV64ZVE32F-NEXT:    andi a2, a2, 2
+; RV64ZVE32F-NEXT:    beqz a2, .LBB76_2
 ; RV64ZVE32F-NEXT:  .LBB76_4: # %cond.store1
 ; RV64ZVE32F-NEXT:    fsd fa1, 0(a1)
 ; RV64ZVE32F-NEXT:    ret
@@ -8355,8 +8355,8 @@ define void @mscatter_v4f64(<4 x double> %val, <4 x double*> %ptrs, <4 x i1> %m)
 ; RV64ZVE32F-NEXT:    andi a0, a3, 4
 ; RV64ZVE32F-NEXT:    bnez a0, .LBB77_7
 ; RV64ZVE32F-NEXT:  .LBB77_3: # %else4
-; RV64ZVE32F-NEXT:    andi a0, a3, 8
-; RV64ZVE32F-NEXT:    bnez a0, .LBB77_8
+; RV64ZVE32F-NEXT:    andi a3, a3, 8
+; RV64ZVE32F-NEXT:    bnez a3, .LBB77_8
 ; RV64ZVE32F-NEXT:  .LBB77_4: # %else6
 ; RV64ZVE32F-NEXT:    ret
 ; RV64ZVE32F-NEXT:  .LBB77_5: # %cond.store
@@ -8370,8 +8370,8 @@ define void @mscatter_v4f64(<4 x double> %val, <4 x double*> %ptrs, <4 x i1> %m)
 ; RV64ZVE32F-NEXT:    beqz a0, .LBB77_3
 ; RV64ZVE32F-NEXT:  .LBB77_7: # %cond.store3
 ; RV64ZVE32F-NEXT:    fsd fa2, 0(a2)
-; RV64ZVE32F-NEXT:    andi a0, a3, 8
-; RV64ZVE32F-NEXT:    beqz a0, .LBB77_4
+; RV64ZVE32F-NEXT:    andi a3, a3, 8
+; RV64ZVE32F-NEXT:    beqz a3, .LBB77_4
 ; RV64ZVE32F-NEXT:  .LBB77_8: # %cond.store5
 ; RV64ZVE32F-NEXT:    fsd fa3, 0(a1)
 ; RV64ZVE32F-NEXT:    ret
@@ -8452,8 +8452,8 @@ define void @mscatter_truemask_v4f64(<4 x double> %val, <4 x double*> %ptrs) {
 ; RV64ZVE32F-NEXT:    andi a0, a3, 4
 ; RV64ZVE32F-NEXT:    bnez a0, .LBB78_7
 ; RV64ZVE32F-NEXT:  .LBB78_3: # %else4
-; RV64ZVE32F-NEXT:    andi a0, a3, 8
-; RV64ZVE32F-NEXT:    bnez a0, .LBB78_8
+; RV64ZVE32F-NEXT:    andi a3, a3, 8
+; RV64ZVE32F-NEXT:    bnez a3, .LBB78_8
 ; RV64ZVE32F-NEXT:  .LBB78_4: # %else6
 ; RV64ZVE32F-NEXT:    ret
 ; RV64ZVE32F-NEXT:  .LBB78_5: # %cond.store
@@ -8467,8 +8467,8 @@ define void @mscatter_truemask_v4f64(<4 x double> %val, <4 x double*> %ptrs) {
 ; RV64ZVE32F-NEXT:    beqz a0, .LBB78_3
 ; RV64ZVE32F-NEXT:  .LBB78_7: # %cond.store3
 ; RV64ZVE32F-NEXT:    fsd fa2, 0(a2)
-; RV64ZVE32F-NEXT:    andi a0, a3, 8
-; RV64ZVE32F-NEXT:    beqz a0, .LBB78_4
+; RV64ZVE32F-NEXT:    andi a3, a3, 8
+; RV64ZVE32F-NEXT:    beqz a3, .LBB78_4
 ; RV64ZVE32F-NEXT:  .LBB78_8: # %cond.store5
 ; RV64ZVE32F-NEXT:    fsd fa3, 0(a1)
 ; RV64ZVE32F-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll
index 6dc46ae1d9389..18b7393b97d57 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll
@@ -185,15 +185,15 @@ define <vscale x 1 x double> @test5(i64 %avl, i8 zeroext %cond, <vscale x 1 x do
 ; CHECK-NEXT:    bnez a2, .LBB4_3
 ; CHECK-NEXT:  # %bb.1: # %if.else
 ; CHECK-NEXT:    vfsub.vv v9, v8, v9
-; CHECK-NEXT:    andi a0, a1, 2
-; CHECK-NEXT:    beqz a0, .LBB4_4
+; CHECK-NEXT:    andi a1, a1, 2
+; CHECK-NEXT:    beqz a1, .LBB4_4
 ; CHECK-NEXT:  .LBB4_2: # %if.then4
 ; CHECK-NEXT:    vfmul.vv v8, v9, v8
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:  .LBB4_3: # %if.then
 ; CHECK-NEXT:    vfadd.vv v9, v8, v9
-; CHECK-NEXT:    andi a0, a1, 2
-; CHECK-NEXT:    bnez a0, .LBB4_2
+; CHECK-NEXT:    andi a1, a1, 2
+; CHECK-NEXT:    bnez a1, .LBB4_2
 ; CHECK-NEXT:  .LBB4_4: # %if.else5
 ; CHECK-NEXT:    vfmul.vv v8, v8, v9
 ; CHECK-NEXT:    ret
@@ -504,8 +504,8 @@ define <vscale x 2 x i32> @test_vsetvli_x0_x0(<vscale x 2 x i32>* %x, <vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
 ; CHECK-NEXT:    vle32.v v9, (a0)
-; CHECK-NEXT:    andi a0, a3, 1
-; CHECK-NEXT:    beqz a0, .LBB9_2
+; CHECK-NEXT:    andi a3, a3, 1
+; CHECK-NEXT:    beqz a3, .LBB9_2
 ; CHECK-NEXT:  # %bb.1: # %if
 ; CHECK-NEXT:    vle16.v v10, (a1)
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
@@ -543,15 +543,15 @@ define <vscale x 2 x i32> @test_vsetvli_x0_x0_2(<vscale x 2 x i32>* %x, <vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a3, e32, m1, ta, ma
 ; CHECK-NEXT:    vle32.v v9, (a0)
-; CHECK-NEXT:    andi a0, a4, 1
-; CHECK-NEXT:    beqz a0, .LBB10_2
+; CHECK-NEXT:    andi a4, a4, 1
+; CHECK-NEXT:    beqz a4, .LBB10_2
 ; CHECK-NEXT:  # %bb.1: # %if
 ; CHECK-NEXT:    vle16.v v10, (a1)
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vwadd.wv v9, v9, v10
 ; CHECK-NEXT:  .LBB10_2: # %if.end
-; CHECK-NEXT:    andi a0, a5, 1
-; CHECK-NEXT:    beqz a0, .LBB10_4
+; CHECK-NEXT:    andi a5, a5, 1
+; CHECK-NEXT:    beqz a5, .LBB10_4
 ; CHECK-NEXT:  # %bb.3: # %if2
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vle16.v v10, (a2)
@@ -900,8 +900,8 @@ define <vscale x 2 x i32> @test_ratio_only_vmv_s_x(<vscale x 2 x i32>* %x, <vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetivli zero, 2, e32, m1, ta, ma
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    andi a0, a2, 1
-; CHECK-NEXT:    beqz a0, .LBB20_2
+; CHECK-NEXT:    andi a2, a2, 1
+; CHECK-NEXT:    beqz a2, .LBB20_2
 ; CHECK-NEXT:  # %bb.1: # %if
 ; CHECK-NEXT:    vle16.v v9, (a1)
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
@@ -930,8 +930,8 @@ define <vscale x 2 x i32> @test_ratio_only_vmv_s_x2(<vscale x 2 x i32>* %x, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf2, ta, ma
 ; CHECK-NEXT:    vle16.v v9, (a1)
-; CHECK-NEXT:    andi a1, a2, 1
-; CHECK-NEXT:    beqz a1, .LBB21_2
+; CHECK-NEXT:    andi a2, a2, 1
+; CHECK-NEXT:    beqz a2, .LBB21_2
 ; CHECK-NEXT:  # %bb.1: # %if
 ; CHECK-NEXT:    vle32.v v8, (a0)
 ; CHECK-NEXT:    j .LBB21_3

diff  --git a/llvm/test/CodeGen/RISCV/select-optimize-multiple.ll b/llvm/test/CodeGen/RISCV/select-optimize-multiple.ll
index 0219ab1f241b6..8d5c11113f308 100644
--- a/llvm/test/CodeGen/RISCV/select-optimize-multiple.ll
+++ b/llvm/test/CodeGen/RISCV/select-optimize-multiple.ll
@@ -305,17 +305,17 @@ entry:
 define i32 @cmov
diff cc(i1 %a, i1 %b, i32 %c, i32 %d, i32 %e, i32 %f) nounwind {
 ; RV32I-LABEL: cmov
diff cc:
 ; RV32I:       # %bb.0: # %entry
-; RV32I-NEXT:    andi a6, a0, 1
-; RV32I-NEXT:    andi a0, a1, 1
-; RV32I-NEXT:    beqz a6, .LBB7_3
+; RV32I-NEXT:    andi a0, a0, 1
+; RV32I-NEXT:    andi a1, a1, 1
+; RV32I-NEXT:    beqz a0, .LBB7_3
 ; RV32I-NEXT:  # %bb.1: # %entry
-; RV32I-NEXT:    beqz a0, .LBB7_4
+; RV32I-NEXT:    beqz a1, .LBB7_4
 ; RV32I-NEXT:  .LBB7_2: # %entry
 ; RV32I-NEXT:    add a0, a2, a4
 ; RV32I-NEXT:    ret
 ; RV32I-NEXT:  .LBB7_3: # %entry
 ; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    bnez a0, .LBB7_2
+; RV32I-NEXT:    bnez a1, .LBB7_2
 ; RV32I-NEXT:  .LBB7_4: # %entry
 ; RV32I-NEXT:    mv a4, a5
 ; RV32I-NEXT:    add a0, a2, a4
@@ -323,17 +323,17 @@ define i32 @cmov
diff cc(i1 %a, i1 %b, i32 %c, i32 %d, i32 %e, i32 %f) nounwind {
 ;
 ; RV64I-LABEL: cmov
diff cc:
 ; RV64I:       # %bb.0: # %entry
-; RV64I-NEXT:    andi a6, a0, 1
-; RV64I-NEXT:    andi a0, a1, 1
-; RV64I-NEXT:    beqz a6, .LBB7_3
+; RV64I-NEXT:    andi a0, a0, 1
+; RV64I-NEXT:    andi a1, a1, 1
+; RV64I-NEXT:    beqz a0, .LBB7_3
 ; RV64I-NEXT:  # %bb.1: # %entry
-; RV64I-NEXT:    beqz a0, .LBB7_4
+; RV64I-NEXT:    beqz a1, .LBB7_4
 ; RV64I-NEXT:  .LBB7_2: # %entry
 ; RV64I-NEXT:    addw a0, a2, a4
 ; RV64I-NEXT:    ret
 ; RV64I-NEXT:  .LBB7_3: # %entry
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    bnez a0, .LBB7_2
+; RV64I-NEXT:    bnez a1, .LBB7_2
 ; RV64I-NEXT:  .LBB7_4: # %entry
 ; RV64I-NEXT:    mv a4, a5
 ; RV64I-NEXT:    addw a0, a2, a4

diff  --git a/llvm/test/CodeGen/RISCV/setcc-logic.ll b/llvm/test/CodeGen/RISCV/setcc-logic.ll
index 2e75b02e27b90..3e822b88e6c6b 100644
--- a/llvm/test/CodeGen/RISCV/setcc-logic.ll
+++ b/llvm/test/CodeGen/RISCV/setcc-logic.ll
@@ -55,8 +55,8 @@ define i1 @or_icmps_const_1bit_
diff (i64 %x) nounwind {
 ; RV32I-NEXT:    sltu a0, a2, a0
 ; RV32I-NEXT:    add a0, a1, a0
 ; RV32I-NEXT:    addi a0, a0, -1
-; RV32I-NEXT:    andi a1, a2, -5
-; RV32I-NEXT:    or a0, a1, a0
+; RV32I-NEXT:    andi a2, a2, -5
+; RV32I-NEXT:    or a0, a2, a0
 ; RV32I-NEXT:    seqz a0, a0
 ; RV32I-NEXT:    ret
 ;

diff  --git a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
index 56f0fc2eaea2e..6495238b4b2f1 100644
--- a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
+++ b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
@@ -363,9 +363,9 @@ define void @test_srem_vec(<3 x i33>* %X) nounwind {
 ; RV32-NEXT:    or a0, a1, a0
 ; RV32-NEXT:    sw a0, 4(s0)
 ; RV32-NEXT:    srli a0, a3, 31
-; RV32-NEXT:    andi a1, a3, 1
-; RV32-NEXT:    slli a1, a1, 1
-; RV32-NEXT:    or a0, a0, a1
+; RV32-NEXT:    andi a3, a3, 1
+; RV32-NEXT:    slli a3, a3, 1
+; RV32-NEXT:    or a0, a0, a3
 ; RV32-NEXT:    slli a2, a2, 2
 ; RV32-NEXT:    or a0, a0, a2
 ; RV32-NEXT:    sw a0, 8(s0)
@@ -513,9 +513,9 @@ define void @test_srem_vec(<3 x i33>* %X) nounwind {
 ; RV32M-NEXT:    or a0, a1, a0
 ; RV32M-NEXT:    sw a0, 4(s0)
 ; RV32M-NEXT:    srli a0, a3, 31
-; RV32M-NEXT:    andi a1, a3, 1
-; RV32M-NEXT:    slli a1, a1, 1
-; RV32M-NEXT:    or a0, a0, a1
+; RV32M-NEXT:    andi a3, a3, 1
+; RV32M-NEXT:    slli a3, a3, 1
+; RV32M-NEXT:    or a0, a0, a3
 ; RV32M-NEXT:    slli a2, a2, 2
 ; RV32M-NEXT:    or a0, a0, a2
 ; RV32M-NEXT:    sw a0, 8(s0)

diff  --git a/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll b/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll
index cd0c4efc41fce..0113bd5d0d26c 100644
--- a/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll
+++ b/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll
@@ -622,9 +622,9 @@ define <4 x i16> @dont_fold_urem_power_of_two(<4 x i16> %x) nounwind {
 ; RV32I-NEXT:    call __umodsi3 at plt
 ; RV32I-NEXT:    andi a1, s3, 63
 ; RV32I-NEXT:    andi a2, s2, 31
-; RV32I-NEXT:    andi a3, s1, 7
+; RV32I-NEXT:    andi s1, s1, 7
 ; RV32I-NEXT:    sh a0, 6(s0)
-; RV32I-NEXT:    sh a3, 4(s0)
+; RV32I-NEXT:    sh s1, 4(s0)
 ; RV32I-NEXT:    sh a2, 2(s0)
 ; RV32I-NEXT:    sh a1, 0(s0)
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
@@ -678,9 +678,9 @@ define <4 x i16> @dont_fold_urem_power_of_two(<4 x i16> %x) nounwind {
 ; RV64I-NEXT:    call __umoddi3 at plt
 ; RV64I-NEXT:    andi a1, s3, 63
 ; RV64I-NEXT:    andi a2, s2, 31
-; RV64I-NEXT:    andi a3, s1, 7
+; RV64I-NEXT:    andi s1, s1, 7
 ; RV64I-NEXT:    sh a0, 6(s0)
-; RV64I-NEXT:    sh a3, 4(s0)
+; RV64I-NEXT:    sh s1, 4(s0)
 ; RV64I-NEXT:    sh a2, 2(s0)
 ; RV64I-NEXT:    sh a1, 0(s0)
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
@@ -708,10 +708,10 @@ define <4 x i16> @dont_fold_urem_power_of_two(<4 x i16> %x) nounwind {
 ; RV64IM-NEXT:    mulw a3, a3, a6
 ; RV64IM-NEXT:    subw a2, a2, a3
 ; RV64IM-NEXT:    andi a1, a1, 63
-; RV64IM-NEXT:    andi a3, a5, 31
+; RV64IM-NEXT:    andi a5, a5, 31
 ; RV64IM-NEXT:    andi a4, a4, 7
 ; RV64IM-NEXT:    sh a4, 4(a0)
-; RV64IM-NEXT:    sh a3, 2(a0)
+; RV64IM-NEXT:    sh a5, 2(a0)
 ; RV64IM-NEXT:    sh a1, 0(a0)
 ; RV64IM-NEXT:    sh a2, 6(a0)
 ; RV64IM-NEXT:    ret


        


More information about the llvm-commits mailing list