[llvm] [RISCV] Enable TTI::shouldDropLSRSolutionIfLessProfitable by default (PR #89927)

Alex Bradbury via llvm-commits llvm-commits at lists.llvm.org
Wed Apr 24 06:35:17 PDT 2024


https://github.com/asb created https://github.com/llvm/llvm-project/pull/89927

This avoids some cases where LSR produces results that lead to very poor codegen. There's a chance we'll see minor degradations for some inputs in the case that our metrics say the found solution is worse, but in reality it's better than the starting point.

---

Stacks on https://github.com/llvm/llvm-project/pull/89924

Not tested enough yet to realistically merge at this point, but posting early in case others have looked at this already. Here is an example input that has terrible codegen without this:

```llvm
%struct.ham = type { i64, i32, i32, i32, i32, i32, i32, i32, i32, i64, i32, i64, i64, i32, i64 }

define i32 @main() {
bb:
  %call = tail call ptr null(i64 0)
  br label %bb2

bb1:                                              ; No predecessors!
  %load = load i32, ptr %call, align 4
  ret i32 0

bb2:                                              ; preds = %bb2, %bb
  %phi = phi i64 [ 0, %bb ], [ %add, %bb2 ]
  %getelementptr = getelementptr %struct.ham, ptr %call, i64 %phi
  %getelementptr3 = getelementptr i8, ptr %getelementptr, i64 8
  store i32 0, ptr %getelementptr3, align 8
  %getelementptr4 = getelementptr i8, ptr %getelementptr, i64 12
  store i32 0, ptr %getelementptr4, align 4
  %getelementptr5 = getelementptr i8, ptr %getelementptr, i64 16
  store i32 0, ptr %getelementptr5, align 8
  %getelementptr6 = getelementptr i8, ptr %getelementptr, i64 20
  store i32 0, ptr %getelementptr6, align 4
  %getelementptr7 = getelementptr i8, ptr %getelementptr, i64 24
  store i32 0, ptr %getelementptr7, align 8
  %getelementptr8 = getelementptr i8, ptr %getelementptr, i64 28
  store i32 0, ptr %getelementptr8, align 4
  %getelementptr9 = getelementptr i8, ptr %getelementptr, i64 32
  store i32 0, ptr %getelementptr9, align 8
  %getelementptr10 = getelementptr i8, ptr %getelementptr, i64 36
  store i32 0, ptr %getelementptr10, align 4
  %getelementptr11 = getelementptr i8, ptr %getelementptr, i64 40
  store i64 0, ptr %getelementptr11, align 8
  %getelementptr12 = getelementptr i8, ptr %getelementptr, i64 48
  store i32 0, ptr %getelementptr12, align 8
  %getelementptr13 = getelementptr i8, ptr %getelementptr, i64 72
  store i32 0, ptr %getelementptr13, align 8
  %getelementptr14 = getelementptr i8, ptr %getelementptr, i64 80
  store i64 0, ptr %getelementptr14, align 8
  %add = add i64 %phi, 1
  br label %bb2
}
```

There's probably an LSR change related to its search heuristic, but finding a solution that by LSR's metrics is much worse than the starting point and then going ahead with it seems hard to justify.

>From 0e3aa08128c9ac00931483850e66b1dbcc94d30b Mon Sep 17 00:00:00 2001
From: Alex Bradbury <asb at igalia.com>
Date: Wed, 24 Apr 2024 14:09:17 +0100
Subject: [PATCH 1/2] [LSR] Provide TTI hook to enable dropping solutions
 deemed to be unprofitable

<https://reviews.llvm.org/D126043> introduced a flag to drop solutions
if deemed unprofitable. As noted there, introducing a TTI hook enables
backends to individually opt into this behaviour.
---
 .../llvm/Analysis/TargetTransformInfo.h        |  8 ++++++++
 .../llvm/Analysis/TargetTransformInfoImpl.h    |  2 ++
 llvm/include/llvm/CodeGen/BasicTTIImpl.h       |  4 ++++
 llvm/lib/Analysis/TargetTransformInfo.cpp      |  4 ++++
 .../Transforms/Scalar/LoopStrengthReduce.cpp   | 18 +++++++++++++++---
 5 files changed, 33 insertions(+), 3 deletions(-)

diff --git a/llvm/include/llvm/Analysis/TargetTransformInfo.h b/llvm/include/llvm/Analysis/TargetTransformInfo.h
index 58c69ac939763a..7849da9606b259 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfo.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfo.h
@@ -740,6 +740,10 @@ class TargetTransformInfo {
   /// When successful, makes the primary IV dead.
   bool shouldFoldTerminatingConditionAfterLSR() const;
 
+  /// Return true if LSR should drop a found solution if it's calculated to be
+  /// less profitable than the baseline.
+  bool shouldDropLSRSolutionIfLessProfitable() const;
+
   /// \returns true if LSR should not optimize a chain that includes \p I.
   bool isProfitableLSRChainElement(Instruction *I) const;
 
@@ -1861,6 +1865,7 @@ class TargetTransformInfo::Concept {
                              const TargetTransformInfo::LSRCost &C2) = 0;
   virtual bool isNumRegsMajorCostOfLSR() = 0;
   virtual bool shouldFoldTerminatingConditionAfterLSR() const = 0;
+  virtual bool shouldDropLSRSolutionIfLessProfitable() const = 0;
   virtual bool isProfitableLSRChainElement(Instruction *I) = 0;
   virtual bool canMacroFuseCmp() = 0;
   virtual bool canSaveCmp(Loop *L, BranchInst **BI, ScalarEvolution *SE,
@@ -2333,6 +2338,9 @@ class TargetTransformInfo::Model final : public TargetTransformInfo::Concept {
   bool shouldFoldTerminatingConditionAfterLSR() const override {
     return Impl.shouldFoldTerminatingConditionAfterLSR();
   }
+  bool shouldDropLSRSolutionIfLessProfitable() const override {
+    return Impl.shouldDropLSRSolutionIfLessProfitable();
+  }
   bool isProfitableLSRChainElement(Instruction *I) override {
     return Impl.isProfitableLSRChainElement(I);
   }
diff --git a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
index 4d5cd963e09267..221e851ba23c01 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
@@ -240,6 +240,8 @@ class TargetTransformInfoImplBase {
 
   bool shouldFoldTerminatingConditionAfterLSR() const { return false; }
 
+  bool shouldDropLSRSolutionIfLessProfitable() const { return false; }
+
   bool isProfitableLSRChainElement(Instruction *I) const { return false; }
 
   bool canMacroFuseCmp() const { return false; }
diff --git a/llvm/include/llvm/CodeGen/BasicTTIImpl.h b/llvm/include/llvm/CodeGen/BasicTTIImpl.h
index 06a19c75cf873a..cfa43709e58e10 100644
--- a/llvm/include/llvm/CodeGen/BasicTTIImpl.h
+++ b/llvm/include/llvm/CodeGen/BasicTTIImpl.h
@@ -398,6 +398,10 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
         shouldFoldTerminatingConditionAfterLSR();
   }
 
+  bool shouldDropLSRSolutionIfLessProfitable() const {
+    return TargetTransformInfoImplBase::shouldDropLSRSolutionIfLessProfitable();
+  }
+
   bool isProfitableLSRChainElement(Instruction *I) {
     return TargetTransformInfoImplBase::isProfitableLSRChainElement(I);
   }
diff --git a/llvm/lib/Analysis/TargetTransformInfo.cpp b/llvm/lib/Analysis/TargetTransformInfo.cpp
index 33c899fe889990..a8f258ab14561e 100644
--- a/llvm/lib/Analysis/TargetTransformInfo.cpp
+++ b/llvm/lib/Analysis/TargetTransformInfo.cpp
@@ -427,6 +427,10 @@ bool TargetTransformInfo::shouldFoldTerminatingConditionAfterLSR() const {
   return TTIImpl->shouldFoldTerminatingConditionAfterLSR();
 }
 
+bool TargetTransformInfo::shouldDropLSRSolutionIfLessProfitable() const {
+  return TTIImpl->shouldDropLSRSolutionIfLessProfitable();
+}
+
 bool TargetTransformInfo::isProfitableLSRChainElement(Instruction *I) const {
   return TTIImpl->isProfitableLSRChainElement(I);
 }
diff --git a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
index ec42e2d6e193a6..1384ecfce373c7 100644
--- a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
@@ -193,8 +193,8 @@ static cl::opt<cl::boolOrDefault> AllowTerminatingConditionFoldingAfterLSR(
     "lsr-term-fold", cl::Hidden,
     cl::desc("Attempt to replace primary IV with other IV."));
 
-static cl::opt<bool> AllowDropSolutionIfLessProfitable(
-    "lsr-drop-solution", cl::Hidden, cl::init(false),
+static cl::opt<cl::boolOrDefault> AllowDropSolutionIfLessProfitable(
+    "lsr-drop-solution", cl::Hidden,
     cl::desc("Attempt to drop solution if it is less profitable"));
 
 STATISTIC(NumTermFold,
@@ -5248,10 +5248,22 @@ void LSRInstance::Solve(SmallVectorImpl<const Formula *> &Solution) const {
 
   assert(Solution.size() == Uses.size() && "Malformed solution!");
 
+  const bool EnableDropUnprofitableSolution = [&] {
+    switch (AllowDropSolutionIfLessProfitable) {
+    case cl::BOU_TRUE:
+      return true;
+    case cl::BOU_FALSE:
+      return false;
+    case cl::BOU_UNSET:
+      return TTI.shouldDropLSRSolutionIfLessProfitable();
+    }
+    llvm_unreachable("Unhandled cl::boolOrDefault enum");
+  }();
+
   if (BaselineCost.isLess(SolutionCost)) {
     LLVM_DEBUG(dbgs() << "The baseline solution requires ";
                BaselineCost.print(dbgs()); dbgs() << "\n");
-    if (!AllowDropSolutionIfLessProfitable)
+    if (!EnableDropUnprofitableSolution)
       LLVM_DEBUG(
           dbgs() << "Baseline is more profitable than chosen solution, "
                     "add option 'lsr-drop-solution' to drop LSR solution.\n");

>From f1a1777a31ac83906f258e3f1ea2b824a1439319 Mon Sep 17 00:00:00 2001
From: Alex Bradbury <asb at igalia.com>
Date: Wed, 24 Apr 2024 14:22:33 +0100
Subject: [PATCH 2/2] [RISCV] Enable TTI::shouldDropLSRSolutionIfLessProfitable
 by default

This avoids some cases where LSR produces results that lead to very poor
codegen. There's a chance we'll see minor degradations for some inputs
in the case that our metrics say the found solution is worse, but in
reality it's better than the starting point.
---
 .../Target/RISCV/RISCVTargetTransformInfo.h   |   2 +
 .../RISCV/rvv/dont-sink-splat-operands.ll     | 129 +++--
 .../CodeGen/RISCV/rvv/sink-splat-operands.ll  | 474 +++++++++---------
 .../RISCV/rvv/vsetvli-insert-crossbb.ll       |  36 +-
 4 files changed, 312 insertions(+), 329 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
index 2f9281ab892447..fa3a88a700226f 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
+++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
@@ -397,6 +397,8 @@ class RISCVTTIImpl : public BasicTTIImplBase<RISCVTTIImpl> {
   bool shouldFoldTerminatingConditionAfterLSR() const {
     return true;
   }
+
+  bool shouldDropLSRSolutionIfLessProfitable() const { return true; }
 };
 
 } // end namespace llvm
diff --git a/llvm/test/CodeGen/RISCV/rvv/dont-sink-splat-operands.ll b/llvm/test/CodeGen/RISCV/rvv/dont-sink-splat-operands.ll
index dc4d28819bbbd8..92639be0017e8f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/dont-sink-splat-operands.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/dont-sink-splat-operands.ll
@@ -86,30 +86,29 @@ declare i64 @llvm.vscale.i64()
 define void @sink_splat_add_scalable(ptr nocapture %a, i32 signext %x) {
 ; NO-SINK-LABEL: sink_splat_add_scalable:
 ; NO-SINK:       # %bb.0: # %entry
-; NO-SINK-NEXT:    csrr a5, vlenb
-; NO-SINK-NEXT:    srli a2, a5, 1
+; NO-SINK-NEXT:    csrr a2, vlenb
+; NO-SINK-NEXT:    srli a2, a2, 1
 ; NO-SINK-NEXT:    li a3, 1024
 ; NO-SINK-NEXT:    bgeu a3, a2, .LBB1_2
 ; NO-SINK-NEXT:  # %bb.1:
 ; NO-SINK-NEXT:    li a3, 0
 ; NO-SINK-NEXT:    j .LBB1_5
 ; NO-SINK-NEXT:  .LBB1_2: # %vector.ph
+; NO-SINK-NEXT:    li a5, 0
 ; NO-SINK-NEXT:    addi a3, a2, -1
 ; NO-SINK-NEXT:    andi a4, a3, 1024
 ; NO-SINK-NEXT:    xori a3, a4, 1024
 ; NO-SINK-NEXT:    vsetvli a6, zero, e32, m2, ta, ma
 ; NO-SINK-NEXT:    vmv.v.x v8, a1
-; NO-SINK-NEXT:    slli a5, a5, 1
-; NO-SINK-NEXT:    mv a6, a0
-; NO-SINK-NEXT:    mv a7, a3
 ; NO-SINK-NEXT:  .LBB1_3: # %vector.body
 ; NO-SINK-NEXT:    # =>This Inner Loop Header: Depth=1
+; NO-SINK-NEXT:    slli a6, a5, 2
+; NO-SINK-NEXT:    add a6, a0, a6
 ; NO-SINK-NEXT:    vl2re32.v v10, (a6)
 ; NO-SINK-NEXT:    vadd.vv v10, v10, v8
+; NO-SINK-NEXT:    add a5, a5, a2
 ; NO-SINK-NEXT:    vs2r.v v10, (a6)
-; NO-SINK-NEXT:    sub a7, a7, a2
-; NO-SINK-NEXT:    add a6, a6, a5
-; NO-SINK-NEXT:    bnez a7, .LBB1_3
+; NO-SINK-NEXT:    bne a5, a3, .LBB1_3
 ; NO-SINK-NEXT:  # %bb.4: # %middle.block
 ; NO-SINK-NEXT:    beqz a4, .LBB1_7
 ; NO-SINK-NEXT:  .LBB1_5: # %for.body.preheader
@@ -129,29 +128,28 @@ define void @sink_splat_add_scalable(ptr nocapture %a, i32 signext %x) {
 ;
 ; SINK-LABEL: sink_splat_add_scalable:
 ; SINK:       # %bb.0: # %entry
-; SINK-NEXT:    csrr a5, vlenb
-; SINK-NEXT:    srli a2, a5, 1
+; SINK-NEXT:    csrr a2, vlenb
+; SINK-NEXT:    srli a2, a2, 1
 ; SINK-NEXT:    li a3, 1024
 ; SINK-NEXT:    bgeu a3, a2, .LBB1_2
 ; SINK-NEXT:  # %bb.1:
 ; SINK-NEXT:    li a3, 0
 ; SINK-NEXT:    j .LBB1_5
 ; SINK-NEXT:  .LBB1_2: # %vector.ph
+; SINK-NEXT:    li a5, 0
 ; SINK-NEXT:    addi a3, a2, -1
 ; SINK-NEXT:    andi a4, a3, 1024
 ; SINK-NEXT:    xori a3, a4, 1024
-; SINK-NEXT:    slli a5, a5, 1
 ; SINK-NEXT:    vsetvli a6, zero, e32, m2, ta, ma
-; SINK-NEXT:    mv a6, a0
-; SINK-NEXT:    mv a7, a3
 ; SINK-NEXT:  .LBB1_3: # %vector.body
 ; SINK-NEXT:    # =>This Inner Loop Header: Depth=1
+; SINK-NEXT:    slli a6, a5, 2
+; SINK-NEXT:    add a6, a0, a6
 ; SINK-NEXT:    vl2re32.v v8, (a6)
 ; SINK-NEXT:    vadd.vx v8, v8, a1
+; SINK-NEXT:    add a5, a5, a2
 ; SINK-NEXT:    vs2r.v v8, (a6)
-; SINK-NEXT:    sub a7, a7, a2
-; SINK-NEXT:    add a6, a6, a5
-; SINK-NEXT:    bnez a7, .LBB1_3
+; SINK-NEXT:    bne a5, a3, .LBB1_3
 ; SINK-NEXT:  # %bb.4: # %middle.block
 ; SINK-NEXT:    beqz a4, .LBB1_7
 ; SINK-NEXT:  .LBB1_5: # %for.body.preheader
@@ -171,29 +169,28 @@ define void @sink_splat_add_scalable(ptr nocapture %a, i32 signext %x) {
 ;
 ; DEFAULT-LABEL: sink_splat_add_scalable:
 ; DEFAULT:       # %bb.0: # %entry
-; DEFAULT-NEXT:    csrr a5, vlenb
-; DEFAULT-NEXT:    srli a2, a5, 1
+; DEFAULT-NEXT:    csrr a2, vlenb
+; DEFAULT-NEXT:    srli a2, a2, 1
 ; DEFAULT-NEXT:    li a3, 1024
 ; DEFAULT-NEXT:    bgeu a3, a2, .LBB1_2
 ; DEFAULT-NEXT:  # %bb.1:
 ; DEFAULT-NEXT:    li a3, 0
 ; DEFAULT-NEXT:    j .LBB1_5
 ; DEFAULT-NEXT:  .LBB1_2: # %vector.ph
+; DEFAULT-NEXT:    li a5, 0
 ; DEFAULT-NEXT:    addi a3, a2, -1
 ; DEFAULT-NEXT:    andi a4, a3, 1024
 ; DEFAULT-NEXT:    xori a3, a4, 1024
-; DEFAULT-NEXT:    slli a5, a5, 1
 ; DEFAULT-NEXT:    vsetvli a6, zero, e32, m2, ta, ma
-; DEFAULT-NEXT:    mv a6, a0
-; DEFAULT-NEXT:    mv a7, a3
 ; DEFAULT-NEXT:  .LBB1_3: # %vector.body
 ; DEFAULT-NEXT:    # =>This Inner Loop Header: Depth=1
+; DEFAULT-NEXT:    slli a6, a5, 2
+; DEFAULT-NEXT:    add a6, a0, a6
 ; DEFAULT-NEXT:    vl2re32.v v8, (a6)
 ; DEFAULT-NEXT:    vadd.vx v8, v8, a1
+; DEFAULT-NEXT:    add a5, a5, a2
 ; DEFAULT-NEXT:    vs2r.v v8, (a6)
-; DEFAULT-NEXT:    sub a7, a7, a2
-; DEFAULT-NEXT:    add a6, a6, a5
-; DEFAULT-NEXT:    bnez a7, .LBB1_3
+; DEFAULT-NEXT:    bne a5, a3, .LBB1_3
 ; DEFAULT-NEXT:  # %bb.4: # %middle.block
 ; DEFAULT-NEXT:    beqz a4, .LBB1_7
 ; DEFAULT-NEXT:  .LBB1_5: # %for.body.preheader
@@ -407,32 +404,32 @@ define void @sink_splat_fadd_scalable(ptr nocapture %a, float %x) {
 ; NO-SINK-LABEL: sink_splat_fadd_scalable:
 ; NO-SINK:       # %bb.0: # %entry
 ; NO-SINK-NEXT:    csrr a1, vlenb
-; NO-SINK-NEXT:    srli a2, a1, 2
-; NO-SINK-NEXT:    li a3, 1024
-; NO-SINK-NEXT:    bgeu a3, a2, .LBB4_2
+; NO-SINK-NEXT:    srli a1, a1, 2
+; NO-SINK-NEXT:    li a2, 1024
+; NO-SINK-NEXT:    bgeu a2, a1, .LBB4_2
 ; NO-SINK-NEXT:  # %bb.1:
-; NO-SINK-NEXT:    li a3, 0
+; NO-SINK-NEXT:    li a2, 0
 ; NO-SINK-NEXT:    j .LBB4_5
 ; NO-SINK-NEXT:  .LBB4_2: # %vector.ph
-; NO-SINK-NEXT:    addi a3, a2, -1
-; NO-SINK-NEXT:    andi a4, a3, 1024
-; NO-SINK-NEXT:    xori a3, a4, 1024
+; NO-SINK-NEXT:    li a4, 0
+; NO-SINK-NEXT:    addi a2, a1, -1
+; NO-SINK-NEXT:    andi a3, a2, 1024
+; NO-SINK-NEXT:    xori a2, a3, 1024
 ; NO-SINK-NEXT:    vsetvli a5, zero, e32, m1, ta, ma
 ; NO-SINK-NEXT:    vfmv.v.f v8, fa0
-; NO-SINK-NEXT:    mv a5, a0
-; NO-SINK-NEXT:    mv a6, a3
 ; NO-SINK-NEXT:  .LBB4_3: # %vector.body
 ; NO-SINK-NEXT:    # =>This Inner Loop Header: Depth=1
+; NO-SINK-NEXT:    slli a5, a4, 2
+; NO-SINK-NEXT:    add a5, a0, a5
 ; NO-SINK-NEXT:    vl1re32.v v9, (a5)
 ; NO-SINK-NEXT:    vfadd.vv v9, v9, v8
+; NO-SINK-NEXT:    add a4, a4, a1
 ; NO-SINK-NEXT:    vs1r.v v9, (a5)
-; NO-SINK-NEXT:    sub a6, a6, a2
-; NO-SINK-NEXT:    add a5, a5, a1
-; NO-SINK-NEXT:    bnez a6, .LBB4_3
+; NO-SINK-NEXT:    bne a4, a2, .LBB4_3
 ; NO-SINK-NEXT:  # %bb.4: # %middle.block
-; NO-SINK-NEXT:    beqz a4, .LBB4_7
+; NO-SINK-NEXT:    beqz a3, .LBB4_7
 ; NO-SINK-NEXT:  .LBB4_5: # %for.body.preheader
-; NO-SINK-NEXT:    slli a1, a3, 2
+; NO-SINK-NEXT:    slli a1, a2, 2
 ; NO-SINK-NEXT:    add a1, a0, a1
 ; NO-SINK-NEXT:    lui a2, 1
 ; NO-SINK-NEXT:    add a0, a0, a2
@@ -449,31 +446,31 @@ define void @sink_splat_fadd_scalable(ptr nocapture %a, float %x) {
 ; SINK-LABEL: sink_splat_fadd_scalable:
 ; SINK:       # %bb.0: # %entry
 ; SINK-NEXT:    csrr a1, vlenb
-; SINK-NEXT:    srli a2, a1, 2
-; SINK-NEXT:    li a3, 1024
-; SINK-NEXT:    bgeu a3, a2, .LBB4_2
+; SINK-NEXT:    srli a1, a1, 2
+; SINK-NEXT:    li a2, 1024
+; SINK-NEXT:    bgeu a2, a1, .LBB4_2
 ; SINK-NEXT:  # %bb.1:
-; SINK-NEXT:    li a3, 0
+; SINK-NEXT:    li a2, 0
 ; SINK-NEXT:    j .LBB4_5
 ; SINK-NEXT:  .LBB4_2: # %vector.ph
-; SINK-NEXT:    addi a3, a2, -1
-; SINK-NEXT:    andi a4, a3, 1024
-; SINK-NEXT:    xori a3, a4, 1024
+; SINK-NEXT:    li a4, 0
+; SINK-NEXT:    addi a2, a1, -1
+; SINK-NEXT:    andi a3, a2, 1024
+; SINK-NEXT:    xori a2, a3, 1024
 ; SINK-NEXT:    vsetvli a5, zero, e32, m1, ta, ma
-; SINK-NEXT:    mv a5, a0
-; SINK-NEXT:    mv a6, a3
 ; SINK-NEXT:  .LBB4_3: # %vector.body
 ; SINK-NEXT:    # =>This Inner Loop Header: Depth=1
+; SINK-NEXT:    slli a5, a4, 2
+; SINK-NEXT:    add a5, a0, a5
 ; SINK-NEXT:    vl1re32.v v8, (a5)
 ; SINK-NEXT:    vfadd.vf v8, v8, fa0
+; SINK-NEXT:    add a4, a4, a1
 ; SINK-NEXT:    vs1r.v v8, (a5)
-; SINK-NEXT:    sub a6, a6, a2
-; SINK-NEXT:    add a5, a5, a1
-; SINK-NEXT:    bnez a6, .LBB4_3
+; SINK-NEXT:    bne a4, a2, .LBB4_3
 ; SINK-NEXT:  # %bb.4: # %middle.block
-; SINK-NEXT:    beqz a4, .LBB4_7
+; SINK-NEXT:    beqz a3, .LBB4_7
 ; SINK-NEXT:  .LBB4_5: # %for.body.preheader
-; SINK-NEXT:    slli a1, a3, 2
+; SINK-NEXT:    slli a1, a2, 2
 ; SINK-NEXT:    add a1, a0, a1
 ; SINK-NEXT:    lui a2, 1
 ; SINK-NEXT:    add a0, a0, a2
@@ -490,31 +487,31 @@ define void @sink_splat_fadd_scalable(ptr nocapture %a, float %x) {
 ; DEFAULT-LABEL: sink_splat_fadd_scalable:
 ; DEFAULT:       # %bb.0: # %entry
 ; DEFAULT-NEXT:    csrr a1, vlenb
-; DEFAULT-NEXT:    srli a2, a1, 2
-; DEFAULT-NEXT:    li a3, 1024
-; DEFAULT-NEXT:    bgeu a3, a2, .LBB4_2
+; DEFAULT-NEXT:    srli a1, a1, 2
+; DEFAULT-NEXT:    li a2, 1024
+; DEFAULT-NEXT:    bgeu a2, a1, .LBB4_2
 ; DEFAULT-NEXT:  # %bb.1:
-; DEFAULT-NEXT:    li a3, 0
+; DEFAULT-NEXT:    li a2, 0
 ; DEFAULT-NEXT:    j .LBB4_5
 ; DEFAULT-NEXT:  .LBB4_2: # %vector.ph
-; DEFAULT-NEXT:    addi a3, a2, -1
-; DEFAULT-NEXT:    andi a4, a3, 1024
-; DEFAULT-NEXT:    xori a3, a4, 1024
+; DEFAULT-NEXT:    li a4, 0
+; DEFAULT-NEXT:    addi a2, a1, -1
+; DEFAULT-NEXT:    andi a3, a2, 1024
+; DEFAULT-NEXT:    xori a2, a3, 1024
 ; DEFAULT-NEXT:    vsetvli a5, zero, e32, m1, ta, ma
-; DEFAULT-NEXT:    mv a5, a0
-; DEFAULT-NEXT:    mv a6, a3
 ; DEFAULT-NEXT:  .LBB4_3: # %vector.body
 ; DEFAULT-NEXT:    # =>This Inner Loop Header: Depth=1
+; DEFAULT-NEXT:    slli a5, a4, 2
+; DEFAULT-NEXT:    add a5, a0, a5
 ; DEFAULT-NEXT:    vl1re32.v v8, (a5)
 ; DEFAULT-NEXT:    vfadd.vf v8, v8, fa0
+; DEFAULT-NEXT:    add a4, a4, a1
 ; DEFAULT-NEXT:    vs1r.v v8, (a5)
-; DEFAULT-NEXT:    sub a6, a6, a2
-; DEFAULT-NEXT:    add a5, a5, a1
-; DEFAULT-NEXT:    bnez a6, .LBB4_3
+; DEFAULT-NEXT:    bne a4, a2, .LBB4_3
 ; DEFAULT-NEXT:  # %bb.4: # %middle.block
-; DEFAULT-NEXT:    beqz a4, .LBB4_7
+; DEFAULT-NEXT:    beqz a3, .LBB4_7
 ; DEFAULT-NEXT:  .LBB4_5: # %for.body.preheader
-; DEFAULT-NEXT:    slli a1, a3, 2
+; DEFAULT-NEXT:    slli a1, a2, 2
 ; DEFAULT-NEXT:    add a1, a0, a1
 ; DEFAULT-NEXT:    lui a2, 1
 ; DEFAULT-NEXT:    add a0, a0, a2
diff --git a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
index 9046c861c3367a..94b5996b768f80 100644
--- a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
@@ -243,29 +243,28 @@ for.cond.cleanup:                                 ; preds = %vector.body
 define void @sink_splat_mul_scalable(ptr nocapture %a, i32 signext %x) {
 ; CHECK-LABEL: sink_splat_mul_scalable:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    csrr a5, vlenb
-; CHECK-NEXT:    srli a2, a5, 1
+; CHECK-NEXT:    csrr a2, vlenb
+; CHECK-NEXT:    srli a2, a2, 1
 ; CHECK-NEXT:    li a3, 1024
 ; CHECK-NEXT:    bgeu a3, a2, .LBB7_2
 ; CHECK-NEXT:  # %bb.1:
 ; CHECK-NEXT:    li a3, 0
 ; CHECK-NEXT:    j .LBB7_5
 ; CHECK-NEXT:  .LBB7_2: # %vector.ph
+; CHECK-NEXT:    li a5, 0
 ; CHECK-NEXT:    addi a3, a2, -1
 ; CHECK-NEXT:    andi a4, a3, 1024
 ; CHECK-NEXT:    xori a3, a4, 1024
-; CHECK-NEXT:    slli a5, a5, 1
 ; CHECK-NEXT:    vsetvli a6, zero, e32, m2, ta, ma
-; CHECK-NEXT:    mv a6, a0
-; CHECK-NEXT:    mv a7, a3
 ; CHECK-NEXT:  .LBB7_3: # %vector.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    slli a6, a5, 2
+; CHECK-NEXT:    add a6, a0, a6
 ; CHECK-NEXT:    vl2re32.v v8, (a6)
 ; CHECK-NEXT:    vmul.vx v8, v8, a1
+; CHECK-NEXT:    add a5, a5, a2
 ; CHECK-NEXT:    vs2r.v v8, (a6)
-; CHECK-NEXT:    sub a7, a7, a2
-; CHECK-NEXT:    add a6, a6, a5
-; CHECK-NEXT:    bnez a7, .LBB7_3
+; CHECK-NEXT:    bne a5, a3, .LBB7_3
 ; CHECK-NEXT:  # %bb.4: # %middle.block
 ; CHECK-NEXT:    beqz a4, .LBB7_7
 ; CHECK-NEXT:  .LBB7_5: # %for.body.preheader
@@ -334,29 +333,28 @@ for.body:                                         ; preds = %for.body.preheader,
 define void @sink_splat_add_scalable(ptr nocapture %a, i32 signext %x) {
 ; CHECK-LABEL: sink_splat_add_scalable:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    csrr a5, vlenb
-; CHECK-NEXT:    srli a2, a5, 1
+; CHECK-NEXT:    csrr a2, vlenb
+; CHECK-NEXT:    srli a2, a2, 1
 ; CHECK-NEXT:    li a3, 1024
 ; CHECK-NEXT:    bgeu a3, a2, .LBB8_2
 ; CHECK-NEXT:  # %bb.1:
 ; CHECK-NEXT:    li a3, 0
 ; CHECK-NEXT:    j .LBB8_5
 ; CHECK-NEXT:  .LBB8_2: # %vector.ph
+; CHECK-NEXT:    li a5, 0
 ; CHECK-NEXT:    addi a3, a2, -1
 ; CHECK-NEXT:    andi a4, a3, 1024
 ; CHECK-NEXT:    xori a3, a4, 1024
-; CHECK-NEXT:    slli a5, a5, 1
 ; CHECK-NEXT:    vsetvli a6, zero, e32, m2, ta, ma
-; CHECK-NEXT:    mv a6, a0
-; CHECK-NEXT:    mv a7, a3
 ; CHECK-NEXT:  .LBB8_3: # %vector.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    slli a6, a5, 2
+; CHECK-NEXT:    add a6, a0, a6
 ; CHECK-NEXT:    vl2re32.v v8, (a6)
 ; CHECK-NEXT:    vadd.vx v8, v8, a1
+; CHECK-NEXT:    add a5, a5, a2
 ; CHECK-NEXT:    vs2r.v v8, (a6)
-; CHECK-NEXT:    sub a7, a7, a2
-; CHECK-NEXT:    add a6, a6, a5
-; CHECK-NEXT:    bnez a7, .LBB8_3
+; CHECK-NEXT:    bne a5, a3, .LBB8_3
 ; CHECK-NEXT:  # %bb.4: # %middle.block
 ; CHECK-NEXT:    beqz a4, .LBB8_7
 ; CHECK-NEXT:  .LBB8_5: # %for.body.preheader
@@ -425,29 +423,28 @@ for.body:                                         ; preds = %for.body.preheader,
 define void @sink_splat_sub_scalable(ptr nocapture %a, i32 signext %x) {
 ; CHECK-LABEL: sink_splat_sub_scalable:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    csrr a5, vlenb
-; CHECK-NEXT:    srli a2, a5, 1
+; CHECK-NEXT:    csrr a2, vlenb
+; CHECK-NEXT:    srli a2, a2, 1
 ; CHECK-NEXT:    li a3, 1024
 ; CHECK-NEXT:    bgeu a3, a2, .LBB9_2
 ; CHECK-NEXT:  # %bb.1:
 ; CHECK-NEXT:    li a3, 0
 ; CHECK-NEXT:    j .LBB9_5
 ; CHECK-NEXT:  .LBB9_2: # %vector.ph
+; CHECK-NEXT:    li a5, 0
 ; CHECK-NEXT:    addi a3, a2, -1
 ; CHECK-NEXT:    andi a4, a3, 1024
 ; CHECK-NEXT:    xori a3, a4, 1024
-; CHECK-NEXT:    slli a5, a5, 1
 ; CHECK-NEXT:    vsetvli a6, zero, e32, m2, ta, ma
-; CHECK-NEXT:    mv a6, a0
-; CHECK-NEXT:    mv a7, a3
 ; CHECK-NEXT:  .LBB9_3: # %vector.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    slli a6, a5, 2
+; CHECK-NEXT:    add a6, a0, a6
 ; CHECK-NEXT:    vl2re32.v v8, (a6)
 ; CHECK-NEXT:    vsub.vx v8, v8, a1
+; CHECK-NEXT:    add a5, a5, a2
 ; CHECK-NEXT:    vs2r.v v8, (a6)
-; CHECK-NEXT:    sub a7, a7, a2
-; CHECK-NEXT:    add a6, a6, a5
-; CHECK-NEXT:    bnez a7, .LBB9_3
+; CHECK-NEXT:    bne a5, a3, .LBB9_3
 ; CHECK-NEXT:  # %bb.4: # %middle.block
 ; CHECK-NEXT:    beqz a4, .LBB9_7
 ; CHECK-NEXT:  .LBB9_5: # %for.body.preheader
@@ -516,29 +513,28 @@ for.body:                                         ; preds = %for.body.preheader,
 define void @sink_splat_rsub_scalable(ptr nocapture %a, i32 signext %x) {
 ; CHECK-LABEL: sink_splat_rsub_scalable:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    csrr a5, vlenb
-; CHECK-NEXT:    srli a2, a5, 1
+; CHECK-NEXT:    csrr a2, vlenb
+; CHECK-NEXT:    srli a2, a2, 1
 ; CHECK-NEXT:    li a3, 1024
 ; CHECK-NEXT:    bgeu a3, a2, .LBB10_2
 ; CHECK-NEXT:  # %bb.1:
 ; CHECK-NEXT:    li a3, 0
 ; CHECK-NEXT:    j .LBB10_5
 ; CHECK-NEXT:  .LBB10_2: # %vector.ph
+; CHECK-NEXT:    li a5, 0
 ; CHECK-NEXT:    addi a3, a2, -1
 ; CHECK-NEXT:    andi a4, a3, 1024
 ; CHECK-NEXT:    xori a3, a4, 1024
-; CHECK-NEXT:    slli a5, a5, 1
 ; CHECK-NEXT:    vsetvli a6, zero, e32, m2, ta, ma
-; CHECK-NEXT:    mv a6, a0
-; CHECK-NEXT:    mv a7, a3
 ; CHECK-NEXT:  .LBB10_3: # %vector.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    slli a6, a5, 2
+; CHECK-NEXT:    add a6, a0, a6
 ; CHECK-NEXT:    vl2re32.v v8, (a6)
 ; CHECK-NEXT:    vrsub.vx v8, v8, a1
+; CHECK-NEXT:    add a5, a5, a2
 ; CHECK-NEXT:    vs2r.v v8, (a6)
-; CHECK-NEXT:    sub a7, a7, a2
-; CHECK-NEXT:    add a6, a6, a5
-; CHECK-NEXT:    bnez a7, .LBB10_3
+; CHECK-NEXT:    bne a5, a3, .LBB10_3
 ; CHECK-NEXT:  # %bb.4: # %middle.block
 ; CHECK-NEXT:    beqz a4, .LBB10_7
 ; CHECK-NEXT:  .LBB10_5: # %for.body.preheader
@@ -607,29 +603,28 @@ for.body:                                         ; preds = %for.body.preheader,
 define void @sink_splat_and_scalable(ptr nocapture %a, i32 signext %x) {
 ; CHECK-LABEL: sink_splat_and_scalable:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    csrr a5, vlenb
-; CHECK-NEXT:    srli a2, a5, 1
+; CHECK-NEXT:    csrr a2, vlenb
+; CHECK-NEXT:    srli a2, a2, 1
 ; CHECK-NEXT:    li a3, 1024
 ; CHECK-NEXT:    bgeu a3, a2, .LBB11_2
 ; CHECK-NEXT:  # %bb.1:
 ; CHECK-NEXT:    li a3, 0
 ; CHECK-NEXT:    j .LBB11_5
 ; CHECK-NEXT:  .LBB11_2: # %vector.ph
+; CHECK-NEXT:    li a5, 0
 ; CHECK-NEXT:    addi a3, a2, -1
 ; CHECK-NEXT:    andi a4, a3, 1024
 ; CHECK-NEXT:    xori a3, a4, 1024
-; CHECK-NEXT:    slli a5, a5, 1
 ; CHECK-NEXT:    vsetvli a6, zero, e32, m2, ta, ma
-; CHECK-NEXT:    mv a6, a0
-; CHECK-NEXT:    mv a7, a3
 ; CHECK-NEXT:  .LBB11_3: # %vector.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    slli a6, a5, 2
+; CHECK-NEXT:    add a6, a0, a6
 ; CHECK-NEXT:    vl2re32.v v8, (a6)
 ; CHECK-NEXT:    vand.vx v8, v8, a1
+; CHECK-NEXT:    add a5, a5, a2
 ; CHECK-NEXT:    vs2r.v v8, (a6)
-; CHECK-NEXT:    sub a7, a7, a2
-; CHECK-NEXT:    add a6, a6, a5
-; CHECK-NEXT:    bnez a7, .LBB11_3
+; CHECK-NEXT:    bne a5, a3, .LBB11_3
 ; CHECK-NEXT:  # %bb.4: # %middle.block
 ; CHECK-NEXT:    beqz a4, .LBB11_7
 ; CHECK-NEXT:  .LBB11_5: # %for.body.preheader
@@ -698,29 +693,28 @@ for.body:                                         ; preds = %for.body.preheader,
 define void @sink_splat_or_scalable(ptr nocapture %a, i32 signext %x) {
 ; CHECK-LABEL: sink_splat_or_scalable:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    csrr a5, vlenb
-; CHECK-NEXT:    srli a2, a5, 1
+; CHECK-NEXT:    csrr a2, vlenb
+; CHECK-NEXT:    srli a2, a2, 1
 ; CHECK-NEXT:    li a3, 1024
 ; CHECK-NEXT:    bgeu a3, a2, .LBB12_2
 ; CHECK-NEXT:  # %bb.1:
 ; CHECK-NEXT:    li a3, 0
 ; CHECK-NEXT:    j .LBB12_5
 ; CHECK-NEXT:  .LBB12_2: # %vector.ph
+; CHECK-NEXT:    li a5, 0
 ; CHECK-NEXT:    addi a3, a2, -1
 ; CHECK-NEXT:    andi a4, a3, 1024
 ; CHECK-NEXT:    xori a3, a4, 1024
-; CHECK-NEXT:    slli a5, a5, 1
 ; CHECK-NEXT:    vsetvli a6, zero, e32, m2, ta, ma
-; CHECK-NEXT:    mv a6, a0
-; CHECK-NEXT:    mv a7, a3
 ; CHECK-NEXT:  .LBB12_3: # %vector.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    slli a6, a5, 2
+; CHECK-NEXT:    add a6, a0, a6
 ; CHECK-NEXT:    vl2re32.v v8, (a6)
 ; CHECK-NEXT:    vor.vx v8, v8, a1
+; CHECK-NEXT:    add a5, a5, a2
 ; CHECK-NEXT:    vs2r.v v8, (a6)
-; CHECK-NEXT:    sub a7, a7, a2
-; CHECK-NEXT:    add a6, a6, a5
-; CHECK-NEXT:    bnez a7, .LBB12_3
+; CHECK-NEXT:    bne a5, a3, .LBB12_3
 ; CHECK-NEXT:  # %bb.4: # %middle.block
 ; CHECK-NEXT:    beqz a4, .LBB12_7
 ; CHECK-NEXT:  .LBB12_5: # %for.body.preheader
@@ -789,29 +783,28 @@ for.body:                                         ; preds = %for.body.preheader,
 define void @sink_splat_xor_scalable(ptr nocapture %a, i32 signext %x) {
 ; CHECK-LABEL: sink_splat_xor_scalable:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    csrr a5, vlenb
-; CHECK-NEXT:    srli a2, a5, 1
+; CHECK-NEXT:    csrr a2, vlenb
+; CHECK-NEXT:    srli a2, a2, 1
 ; CHECK-NEXT:    li a3, 1024
 ; CHECK-NEXT:    bgeu a3, a2, .LBB13_2
 ; CHECK-NEXT:  # %bb.1:
 ; CHECK-NEXT:    li a3, 0
 ; CHECK-NEXT:    j .LBB13_5
 ; CHECK-NEXT:  .LBB13_2: # %vector.ph
+; CHECK-NEXT:    li a5, 0
 ; CHECK-NEXT:    addi a3, a2, -1
 ; CHECK-NEXT:    andi a4, a3, 1024
 ; CHECK-NEXT:    xori a3, a4, 1024
-; CHECK-NEXT:    slli a5, a5, 1
 ; CHECK-NEXT:    vsetvli a6, zero, e32, m2, ta, ma
-; CHECK-NEXT:    mv a6, a0
-; CHECK-NEXT:    mv a7, a3
 ; CHECK-NEXT:  .LBB13_3: # %vector.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    slli a6, a5, 2
+; CHECK-NEXT:    add a6, a0, a6
 ; CHECK-NEXT:    vl2re32.v v8, (a6)
 ; CHECK-NEXT:    vxor.vx v8, v8, a1
+; CHECK-NEXT:    add a5, a5, a2
 ; CHECK-NEXT:    vs2r.v v8, (a6)
-; CHECK-NEXT:    sub a7, a7, a2
-; CHECK-NEXT:    add a6, a6, a5
-; CHECK-NEXT:    bnez a7, .LBB13_3
+; CHECK-NEXT:    bne a5, a3, .LBB13_3
 ; CHECK-NEXT:  # %bb.4: # %middle.block
 ; CHECK-NEXT:    beqz a4, .LBB13_7
 ; CHECK-NEXT:  .LBB13_5: # %for.body.preheader
@@ -982,29 +975,28 @@ for.cond.cleanup:                                 ; preds = %vector.body
 define void @sink_splat_shl_scalable(ptr nocapture %a, i32 signext %x) {
 ; CHECK-LABEL: sink_splat_shl_scalable:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    csrr a5, vlenb
-; CHECK-NEXT:    srli a2, a5, 1
+; CHECK-NEXT:    csrr a2, vlenb
+; CHECK-NEXT:    srli a2, a2, 1
 ; CHECK-NEXT:    li a3, 1024
 ; CHECK-NEXT:    bgeu a3, a2, .LBB17_2
 ; CHECK-NEXT:  # %bb.1:
 ; CHECK-NEXT:    li a3, 0
 ; CHECK-NEXT:    j .LBB17_5
 ; CHECK-NEXT:  .LBB17_2: # %vector.ph
+; CHECK-NEXT:    li a5, 0
 ; CHECK-NEXT:    addi a3, a2, -1
 ; CHECK-NEXT:    andi a4, a3, 1024
 ; CHECK-NEXT:    xori a3, a4, 1024
-; CHECK-NEXT:    slli a5, a5, 1
 ; CHECK-NEXT:    vsetvli a6, zero, e32, m2, ta, ma
-; CHECK-NEXT:    mv a6, a0
-; CHECK-NEXT:    mv a7, a3
 ; CHECK-NEXT:  .LBB17_3: # %vector.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    slli a6, a5, 2
+; CHECK-NEXT:    add a6, a0, a6
 ; CHECK-NEXT:    vl2re32.v v8, (a6)
 ; CHECK-NEXT:    vsll.vx v8, v8, a1
+; CHECK-NEXT:    add a5, a5, a2
 ; CHECK-NEXT:    vs2r.v v8, (a6)
-; CHECK-NEXT:    sub a7, a7, a2
-; CHECK-NEXT:    add a6, a6, a5
-; CHECK-NEXT:    bnez a7, .LBB17_3
+; CHECK-NEXT:    bne a5, a3, .LBB17_3
 ; CHECK-NEXT:  # %bb.4: # %middle.block
 ; CHECK-NEXT:    beqz a4, .LBB17_7
 ; CHECK-NEXT:  .LBB17_5: # %for.body.preheader
@@ -1073,29 +1065,28 @@ for.body:                                         ; preds = %for.body.preheader,
 define void @sink_splat_lshr_scalable(ptr nocapture %a, i32 signext %x) {
 ; CHECK-LABEL: sink_splat_lshr_scalable:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    csrr a5, vlenb
-; CHECK-NEXT:    srli a2, a5, 1
+; CHECK-NEXT:    csrr a2, vlenb
+; CHECK-NEXT:    srli a2, a2, 1
 ; CHECK-NEXT:    li a3, 1024
 ; CHECK-NEXT:    bgeu a3, a2, .LBB18_2
 ; CHECK-NEXT:  # %bb.1:
 ; CHECK-NEXT:    li a3, 0
 ; CHECK-NEXT:    j .LBB18_5
 ; CHECK-NEXT:  .LBB18_2: # %vector.ph
+; CHECK-NEXT:    li a5, 0
 ; CHECK-NEXT:    addi a3, a2, -1
 ; CHECK-NEXT:    andi a4, a3, 1024
 ; CHECK-NEXT:    xori a3, a4, 1024
-; CHECK-NEXT:    slli a5, a5, 1
 ; CHECK-NEXT:    vsetvli a6, zero, e32, m2, ta, ma
-; CHECK-NEXT:    mv a6, a0
-; CHECK-NEXT:    mv a7, a3
 ; CHECK-NEXT:  .LBB18_3: # %vector.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    slli a6, a5, 2
+; CHECK-NEXT:    add a6, a0, a6
 ; CHECK-NEXT:    vl2re32.v v8, (a6)
 ; CHECK-NEXT:    vsrl.vx v8, v8, a1
+; CHECK-NEXT:    add a5, a5, a2
 ; CHECK-NEXT:    vs2r.v v8, (a6)
-; CHECK-NEXT:    sub a7, a7, a2
-; CHECK-NEXT:    add a6, a6, a5
-; CHECK-NEXT:    bnez a7, .LBB18_3
+; CHECK-NEXT:    bne a5, a3, .LBB18_3
 ; CHECK-NEXT:  # %bb.4: # %middle.block
 ; CHECK-NEXT:    beqz a4, .LBB18_7
 ; CHECK-NEXT:  .LBB18_5: # %for.body.preheader
@@ -1164,33 +1155,32 @@ for.body:                                         ; preds = %for.body.preheader,
 define void @sink_splat_ashr_scalable(ptr nocapture %a) {
 ; CHECK-LABEL: sink_splat_ashr_scalable:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    csrr a4, vlenb
-; CHECK-NEXT:    srli a2, a4, 1
-; CHECK-NEXT:    li a1, 1024
-; CHECK-NEXT:    bgeu a1, a2, .LBB19_2
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    srli a1, a1, 1
+; CHECK-NEXT:    li a2, 1024
+; CHECK-NEXT:    bgeu a2, a1, .LBB19_2
 ; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    li a1, 0
+; CHECK-NEXT:    li a2, 0
 ; CHECK-NEXT:    j .LBB19_5
 ; CHECK-NEXT:  .LBB19_2: # %vector.ph
-; CHECK-NEXT:    addi a1, a2, -1
-; CHECK-NEXT:    andi a3, a1, 1024
-; CHECK-NEXT:    xori a1, a3, 1024
-; CHECK-NEXT:    slli a4, a4, 1
+; CHECK-NEXT:    li a4, 0
+; CHECK-NEXT:    addi a2, a1, -1
+; CHECK-NEXT:    andi a3, a2, 1024
+; CHECK-NEXT:    xori a2, a3, 1024
 ; CHECK-NEXT:    vsetvli a5, zero, e32, m2, ta, ma
-; CHECK-NEXT:    mv a5, a0
-; CHECK-NEXT:    mv a6, a1
 ; CHECK-NEXT:  .LBB19_3: # %vector.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    slli a5, a4, 2
+; CHECK-NEXT:    add a5, a0, a5
 ; CHECK-NEXT:    vl2re32.v v8, (a5)
 ; CHECK-NEXT:    vsra.vi v8, v8, 2
+; CHECK-NEXT:    add a4, a4, a1
 ; CHECK-NEXT:    vs2r.v v8, (a5)
-; CHECK-NEXT:    sub a6, a6, a2
-; CHECK-NEXT:    add a5, a5, a4
-; CHECK-NEXT:    bnez a6, .LBB19_3
+; CHECK-NEXT:    bne a4, a2, .LBB19_3
 ; CHECK-NEXT:  # %bb.4: # %middle.block
 ; CHECK-NEXT:    beqz a3, .LBB19_7
 ; CHECK-NEXT:  .LBB19_5: # %for.body.preheader
-; CHECK-NEXT:    slli a1, a1, 2
+; CHECK-NEXT:    slli a1, a2, 2
 ; CHECK-NEXT:    add a1, a0, a1
 ; CHECK-NEXT:    lui a2, 1
 ; CHECK-NEXT:    add a0, a0, a2
@@ -1458,31 +1448,31 @@ define void @sink_splat_fmul_scalable(ptr nocapture %a, float %x) {
 ; CHECK-LABEL: sink_splat_fmul_scalable:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    csrr a1, vlenb
-; CHECK-NEXT:    srli a2, a1, 2
-; CHECK-NEXT:    li a3, 1024
-; CHECK-NEXT:    bgeu a3, a2, .LBB26_2
+; CHECK-NEXT:    srli a1, a1, 2
+; CHECK-NEXT:    li a2, 1024
+; CHECK-NEXT:    bgeu a2, a1, .LBB26_2
 ; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    li a3, 0
+; CHECK-NEXT:    li a2, 0
 ; CHECK-NEXT:    j .LBB26_5
 ; CHECK-NEXT:  .LBB26_2: # %vector.ph
-; CHECK-NEXT:    addi a3, a2, -1
-; CHECK-NEXT:    andi a4, a3, 1024
-; CHECK-NEXT:    xori a3, a4, 1024
+; CHECK-NEXT:    li a4, 0
+; CHECK-NEXT:    addi a2, a1, -1
+; CHECK-NEXT:    andi a3, a2, 1024
+; CHECK-NEXT:    xori a2, a3, 1024
 ; CHECK-NEXT:    vsetvli a5, zero, e32, m1, ta, ma
-; CHECK-NEXT:    mv a5, a0
-; CHECK-NEXT:    mv a6, a3
 ; CHECK-NEXT:  .LBB26_3: # %vector.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    slli a5, a4, 2
+; CHECK-NEXT:    add a5, a0, a5
 ; CHECK-NEXT:    vl1re32.v v8, (a5)
 ; CHECK-NEXT:    vfmul.vf v8, v8, fa0
+; CHECK-NEXT:    add a4, a4, a1
 ; CHECK-NEXT:    vs1r.v v8, (a5)
-; CHECK-NEXT:    sub a6, a6, a2
-; CHECK-NEXT:    add a5, a5, a1
-; CHECK-NEXT:    bnez a6, .LBB26_3
+; CHECK-NEXT:    bne a4, a2, .LBB26_3
 ; CHECK-NEXT:  # %bb.4: # %middle.block
-; CHECK-NEXT:    beqz a4, .LBB26_7
+; CHECK-NEXT:    beqz a3, .LBB26_7
 ; CHECK-NEXT:  .LBB26_5: # %for.body.preheader
-; CHECK-NEXT:    slli a1, a3, 2
+; CHECK-NEXT:    slli a1, a2, 2
 ; CHECK-NEXT:    add a1, a0, a1
 ; CHECK-NEXT:    lui a2, 1
 ; CHECK-NEXT:    add a0, a0, a2
@@ -1548,31 +1538,31 @@ define void @sink_splat_fdiv_scalable(ptr nocapture %a, float %x) {
 ; CHECK-LABEL: sink_splat_fdiv_scalable:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    csrr a1, vlenb
-; CHECK-NEXT:    srli a2, a1, 2
-; CHECK-NEXT:    li a3, 1024
-; CHECK-NEXT:    bgeu a3, a2, .LBB27_2
+; CHECK-NEXT:    srli a1, a1, 2
+; CHECK-NEXT:    li a2, 1024
+; CHECK-NEXT:    bgeu a2, a1, .LBB27_2
 ; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    li a3, 0
+; CHECK-NEXT:    li a2, 0
 ; CHECK-NEXT:    j .LBB27_5
 ; CHECK-NEXT:  .LBB27_2: # %vector.ph
-; CHECK-NEXT:    addi a3, a2, -1
-; CHECK-NEXT:    andi a4, a3, 1024
-; CHECK-NEXT:    xori a3, a4, 1024
+; CHECK-NEXT:    li a4, 0
+; CHECK-NEXT:    addi a2, a1, -1
+; CHECK-NEXT:    andi a3, a2, 1024
+; CHECK-NEXT:    xori a2, a3, 1024
 ; CHECK-NEXT:    vsetvli a5, zero, e32, m1, ta, ma
-; CHECK-NEXT:    mv a5, a0
-; CHECK-NEXT:    mv a6, a3
 ; CHECK-NEXT:  .LBB27_3: # %vector.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    slli a5, a4, 2
+; CHECK-NEXT:    add a5, a0, a5
 ; CHECK-NEXT:    vl1re32.v v8, (a5)
 ; CHECK-NEXT:    vfdiv.vf v8, v8, fa0
+; CHECK-NEXT:    add a4, a4, a1
 ; CHECK-NEXT:    vs1r.v v8, (a5)
-; CHECK-NEXT:    sub a6, a6, a2
-; CHECK-NEXT:    add a5, a5, a1
-; CHECK-NEXT:    bnez a6, .LBB27_3
+; CHECK-NEXT:    bne a4, a2, .LBB27_3
 ; CHECK-NEXT:  # %bb.4: # %middle.block
-; CHECK-NEXT:    beqz a4, .LBB27_7
+; CHECK-NEXT:    beqz a3, .LBB27_7
 ; CHECK-NEXT:  .LBB27_5: # %for.body.preheader
-; CHECK-NEXT:    slli a1, a3, 2
+; CHECK-NEXT:    slli a1, a2, 2
 ; CHECK-NEXT:    add a1, a0, a1
 ; CHECK-NEXT:    lui a2, 1
 ; CHECK-NEXT:    add a0, a0, a2
@@ -1638,31 +1628,31 @@ define void @sink_splat_frdiv_scalable(ptr nocapture %a, float %x) {
 ; CHECK-LABEL: sink_splat_frdiv_scalable:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    csrr a1, vlenb
-; CHECK-NEXT:    srli a2, a1, 2
-; CHECK-NEXT:    li a3, 1024
-; CHECK-NEXT:    bgeu a3, a2, .LBB28_2
+; CHECK-NEXT:    srli a1, a1, 2
+; CHECK-NEXT:    li a2, 1024
+; CHECK-NEXT:    bgeu a2, a1, .LBB28_2
 ; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    li a3, 0
+; CHECK-NEXT:    li a2, 0
 ; CHECK-NEXT:    j .LBB28_5
 ; CHECK-NEXT:  .LBB28_2: # %vector.ph
-; CHECK-NEXT:    addi a3, a2, -1
-; CHECK-NEXT:    andi a4, a3, 1024
-; CHECK-NEXT:    xori a3, a4, 1024
+; CHECK-NEXT:    li a4, 0
+; CHECK-NEXT:    addi a2, a1, -1
+; CHECK-NEXT:    andi a3, a2, 1024
+; CHECK-NEXT:    xori a2, a3, 1024
 ; CHECK-NEXT:    vsetvli a5, zero, e32, m1, ta, ma
-; CHECK-NEXT:    mv a5, a0
-; CHECK-NEXT:    mv a6, a3
 ; CHECK-NEXT:  .LBB28_3: # %vector.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    slli a5, a4, 2
+; CHECK-NEXT:    add a5, a0, a5
 ; CHECK-NEXT:    vl1re32.v v8, (a5)
 ; CHECK-NEXT:    vfrdiv.vf v8, v8, fa0
+; CHECK-NEXT:    add a4, a4, a1
 ; CHECK-NEXT:    vs1r.v v8, (a5)
-; CHECK-NEXT:    sub a6, a6, a2
-; CHECK-NEXT:    add a5, a5, a1
-; CHECK-NEXT:    bnez a6, .LBB28_3
+; CHECK-NEXT:    bne a4, a2, .LBB28_3
 ; CHECK-NEXT:  # %bb.4: # %middle.block
-; CHECK-NEXT:    beqz a4, .LBB28_7
+; CHECK-NEXT:    beqz a3, .LBB28_7
 ; CHECK-NEXT:  .LBB28_5: # %for.body.preheader
-; CHECK-NEXT:    slli a1, a3, 2
+; CHECK-NEXT:    slli a1, a2, 2
 ; CHECK-NEXT:    add a1, a0, a1
 ; CHECK-NEXT:    lui a2, 1
 ; CHECK-NEXT:    add a0, a0, a2
@@ -1728,31 +1718,31 @@ define void @sink_splat_fadd_scalable(ptr nocapture %a, float %x) {
 ; CHECK-LABEL: sink_splat_fadd_scalable:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    csrr a1, vlenb
-; CHECK-NEXT:    srli a2, a1, 2
-; CHECK-NEXT:    li a3, 1024
-; CHECK-NEXT:    bgeu a3, a2, .LBB29_2
+; CHECK-NEXT:    srli a1, a1, 2
+; CHECK-NEXT:    li a2, 1024
+; CHECK-NEXT:    bgeu a2, a1, .LBB29_2
 ; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    li a3, 0
+; CHECK-NEXT:    li a2, 0
 ; CHECK-NEXT:    j .LBB29_5
 ; CHECK-NEXT:  .LBB29_2: # %vector.ph
-; CHECK-NEXT:    addi a3, a2, -1
-; CHECK-NEXT:    andi a4, a3, 1024
-; CHECK-NEXT:    xori a3, a4, 1024
+; CHECK-NEXT:    li a4, 0
+; CHECK-NEXT:    addi a2, a1, -1
+; CHECK-NEXT:    andi a3, a2, 1024
+; CHECK-NEXT:    xori a2, a3, 1024
 ; CHECK-NEXT:    vsetvli a5, zero, e32, m1, ta, ma
-; CHECK-NEXT:    mv a5, a0
-; CHECK-NEXT:    mv a6, a3
 ; CHECK-NEXT:  .LBB29_3: # %vector.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    slli a5, a4, 2
+; CHECK-NEXT:    add a5, a0, a5
 ; CHECK-NEXT:    vl1re32.v v8, (a5)
 ; CHECK-NEXT:    vfadd.vf v8, v8, fa0
+; CHECK-NEXT:    add a4, a4, a1
 ; CHECK-NEXT:    vs1r.v v8, (a5)
-; CHECK-NEXT:    sub a6, a6, a2
-; CHECK-NEXT:    add a5, a5, a1
-; CHECK-NEXT:    bnez a6, .LBB29_3
+; CHECK-NEXT:    bne a4, a2, .LBB29_3
 ; CHECK-NEXT:  # %bb.4: # %middle.block
-; CHECK-NEXT:    beqz a4, .LBB29_7
+; CHECK-NEXT:    beqz a3, .LBB29_7
 ; CHECK-NEXT:  .LBB29_5: # %for.body.preheader
-; CHECK-NEXT:    slli a1, a3, 2
+; CHECK-NEXT:    slli a1, a2, 2
 ; CHECK-NEXT:    add a1, a0, a1
 ; CHECK-NEXT:    lui a2, 1
 ; CHECK-NEXT:    add a0, a0, a2
@@ -1818,31 +1808,31 @@ define void @sink_splat_fsub_scalable(ptr nocapture %a, float %x) {
 ; CHECK-LABEL: sink_splat_fsub_scalable:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    csrr a1, vlenb
-; CHECK-NEXT:    srli a2, a1, 2
-; CHECK-NEXT:    li a3, 1024
-; CHECK-NEXT:    bgeu a3, a2, .LBB30_2
+; CHECK-NEXT:    srli a1, a1, 2
+; CHECK-NEXT:    li a2, 1024
+; CHECK-NEXT:    bgeu a2, a1, .LBB30_2
 ; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    li a3, 0
+; CHECK-NEXT:    li a2, 0
 ; CHECK-NEXT:    j .LBB30_5
 ; CHECK-NEXT:  .LBB30_2: # %vector.ph
-; CHECK-NEXT:    addi a3, a2, -1
-; CHECK-NEXT:    andi a4, a3, 1024
-; CHECK-NEXT:    xori a3, a4, 1024
+; CHECK-NEXT:    li a4, 0
+; CHECK-NEXT:    addi a2, a1, -1
+; CHECK-NEXT:    andi a3, a2, 1024
+; CHECK-NEXT:    xori a2, a3, 1024
 ; CHECK-NEXT:    vsetvli a5, zero, e32, m1, ta, ma
-; CHECK-NEXT:    mv a5, a0
-; CHECK-NEXT:    mv a6, a3
 ; CHECK-NEXT:  .LBB30_3: # %vector.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    slli a5, a4, 2
+; CHECK-NEXT:    add a5, a0, a5
 ; CHECK-NEXT:    vl1re32.v v8, (a5)
 ; CHECK-NEXT:    vfsub.vf v8, v8, fa0
+; CHECK-NEXT:    add a4, a4, a1
 ; CHECK-NEXT:    vs1r.v v8, (a5)
-; CHECK-NEXT:    sub a6, a6, a2
-; CHECK-NEXT:    add a5, a5, a1
-; CHECK-NEXT:    bnez a6, .LBB30_3
+; CHECK-NEXT:    bne a4, a2, .LBB30_3
 ; CHECK-NEXT:  # %bb.4: # %middle.block
-; CHECK-NEXT:    beqz a4, .LBB30_7
+; CHECK-NEXT:    beqz a3, .LBB30_7
 ; CHECK-NEXT:  .LBB30_5: # %for.body.preheader
-; CHECK-NEXT:    slli a1, a3, 2
+; CHECK-NEXT:    slli a1, a2, 2
 ; CHECK-NEXT:    add a1, a0, a1
 ; CHECK-NEXT:    lui a2, 1
 ; CHECK-NEXT:    add a0, a0, a2
@@ -1908,31 +1898,31 @@ define void @sink_splat_frsub_scalable(ptr nocapture %a, float %x) {
 ; CHECK-LABEL: sink_splat_frsub_scalable:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    csrr a1, vlenb
-; CHECK-NEXT:    srli a2, a1, 2
-; CHECK-NEXT:    li a3, 1024
-; CHECK-NEXT:    bgeu a3, a2, .LBB31_2
+; CHECK-NEXT:    srli a1, a1, 2
+; CHECK-NEXT:    li a2, 1024
+; CHECK-NEXT:    bgeu a2, a1, .LBB31_2
 ; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    li a3, 0
+; CHECK-NEXT:    li a2, 0
 ; CHECK-NEXT:    j .LBB31_5
 ; CHECK-NEXT:  .LBB31_2: # %vector.ph
-; CHECK-NEXT:    addi a3, a2, -1
-; CHECK-NEXT:    andi a4, a3, 1024
-; CHECK-NEXT:    xori a3, a4, 1024
+; CHECK-NEXT:    li a4, 0
+; CHECK-NEXT:    addi a2, a1, -1
+; CHECK-NEXT:    andi a3, a2, 1024
+; CHECK-NEXT:    xori a2, a3, 1024
 ; CHECK-NEXT:    vsetvli a5, zero, e32, m1, ta, ma
-; CHECK-NEXT:    mv a5, a0
-; CHECK-NEXT:    mv a6, a3
 ; CHECK-NEXT:  .LBB31_3: # %vector.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    slli a5, a4, 2
+; CHECK-NEXT:    add a5, a0, a5
 ; CHECK-NEXT:    vl1re32.v v8, (a5)
 ; CHECK-NEXT:    vfrsub.vf v8, v8, fa0
+; CHECK-NEXT:    add a4, a4, a1
 ; CHECK-NEXT:    vs1r.v v8, (a5)
-; CHECK-NEXT:    sub a6, a6, a2
-; CHECK-NEXT:    add a5, a5, a1
-; CHECK-NEXT:    bnez a6, .LBB31_3
+; CHECK-NEXT:    bne a4, a2, .LBB31_3
 ; CHECK-NEXT:  # %bb.4: # %middle.block
-; CHECK-NEXT:    beqz a4, .LBB31_7
+; CHECK-NEXT:    beqz a3, .LBB31_7
 ; CHECK-NEXT:  .LBB31_5: # %for.body.preheader
-; CHECK-NEXT:    slli a1, a3, 2
+; CHECK-NEXT:    slli a1, a2, 2
 ; CHECK-NEXT:    add a1, a0, a1
 ; CHECK-NEXT:    lui a2, 1
 ; CHECK-NEXT:    add a0, a0, a2
@@ -2074,36 +2064,35 @@ define void @sink_splat_fma_scalable(ptr noalias nocapture %a, ptr noalias nocap
 ; CHECK-LABEL: sink_splat_fma_scalable:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    csrr a2, vlenb
-; CHECK-NEXT:    srli a3, a2, 2
-; CHECK-NEXT:    li a4, 1024
-; CHECK-NEXT:    bgeu a4, a3, .LBB34_2
+; CHECK-NEXT:    srli a2, a2, 2
+; CHECK-NEXT:    li a3, 1024
+; CHECK-NEXT:    bgeu a3, a2, .LBB34_2
 ; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    li a4, 0
+; CHECK-NEXT:    li a3, 0
 ; CHECK-NEXT:    j .LBB34_5
 ; CHECK-NEXT:  .LBB34_2: # %vector.ph
-; CHECK-NEXT:    addi a4, a3, -1
-; CHECK-NEXT:    andi a5, a4, 1024
-; CHECK-NEXT:    xori a4, a5, 1024
+; CHECK-NEXT:    li a5, 0
+; CHECK-NEXT:    addi a3, a2, -1
+; CHECK-NEXT:    andi a4, a3, 1024
+; CHECK-NEXT:    xori a3, a4, 1024
 ; CHECK-NEXT:    vsetvli a6, zero, e32, m1, ta, ma
-; CHECK-NEXT:    mv a6, a0
-; CHECK-NEXT:    mv a7, a1
-; CHECK-NEXT:    mv t0, a4
 ; CHECK-NEXT:  .LBB34_3: # %vector.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    vl1re32.v v8, (a6)
-; CHECK-NEXT:    vl1re32.v v9, (a7)
+; CHECK-NEXT:    slli a6, a5, 2
+; CHECK-NEXT:    add a7, a0, a6
+; CHECK-NEXT:    vl1re32.v v8, (a7)
+; CHECK-NEXT:    add a6, a1, a6
+; CHECK-NEXT:    vl1re32.v v9, (a6)
 ; CHECK-NEXT:    vfmacc.vf v9, fa0, v8
-; CHECK-NEXT:    vs1r.v v9, (a6)
-; CHECK-NEXT:    sub t0, t0, a3
-; CHECK-NEXT:    add a7, a7, a2
-; CHECK-NEXT:    add a6, a6, a2
-; CHECK-NEXT:    bnez t0, .LBB34_3
+; CHECK-NEXT:    add a5, a5, a2
+; CHECK-NEXT:    vs1r.v v9, (a7)
+; CHECK-NEXT:    bne a5, a3, .LBB34_3
 ; CHECK-NEXT:  # %bb.4: # %middle.block
-; CHECK-NEXT:    beqz a5, .LBB34_7
+; CHECK-NEXT:    beqz a4, .LBB34_7
 ; CHECK-NEXT:  .LBB34_5: # %for.body.preheader
-; CHECK-NEXT:    slli a4, a4, 2
-; CHECK-NEXT:    add a2, a1, a4
-; CHECK-NEXT:    add a0, a0, a4
+; CHECK-NEXT:    slli a3, a3, 2
+; CHECK-NEXT:    add a2, a1, a3
+; CHECK-NEXT:    add a0, a0, a3
 ; CHECK-NEXT:    lui a3, 1
 ; CHECK-NEXT:    add a1, a1, a3
 ; CHECK-NEXT:  .LBB34_6: # %for.body
@@ -2174,36 +2163,35 @@ define void @sink_splat_fma_commute_scalable(ptr noalias nocapture %a, ptr noali
 ; CHECK-LABEL: sink_splat_fma_commute_scalable:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    csrr a2, vlenb
-; CHECK-NEXT:    srli a3, a2, 2
-; CHECK-NEXT:    li a4, 1024
-; CHECK-NEXT:    bgeu a4, a3, .LBB35_2
+; CHECK-NEXT:    srli a2, a2, 2
+; CHECK-NEXT:    li a3, 1024
+; CHECK-NEXT:    bgeu a3, a2, .LBB35_2
 ; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    li a4, 0
+; CHECK-NEXT:    li a3, 0
 ; CHECK-NEXT:    j .LBB35_5
 ; CHECK-NEXT:  .LBB35_2: # %vector.ph
-; CHECK-NEXT:    addi a4, a3, -1
-; CHECK-NEXT:    andi a5, a4, 1024
-; CHECK-NEXT:    xori a4, a5, 1024
+; CHECK-NEXT:    li a5, 0
+; CHECK-NEXT:    addi a3, a2, -1
+; CHECK-NEXT:    andi a4, a3, 1024
+; CHECK-NEXT:    xori a3, a4, 1024
 ; CHECK-NEXT:    vsetvli a6, zero, e32, m1, ta, ma
-; CHECK-NEXT:    mv a6, a0
-; CHECK-NEXT:    mv a7, a1
-; CHECK-NEXT:    mv t0, a4
 ; CHECK-NEXT:  .LBB35_3: # %vector.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    vl1re32.v v8, (a6)
-; CHECK-NEXT:    vl1re32.v v9, (a7)
+; CHECK-NEXT:    slli a6, a5, 2
+; CHECK-NEXT:    add a7, a0, a6
+; CHECK-NEXT:    vl1re32.v v8, (a7)
+; CHECK-NEXT:    add a6, a1, a6
+; CHECK-NEXT:    vl1re32.v v9, (a6)
 ; CHECK-NEXT:    vfmacc.vf v9, fa0, v8
-; CHECK-NEXT:    vs1r.v v9, (a6)
-; CHECK-NEXT:    sub t0, t0, a3
-; CHECK-NEXT:    add a7, a7, a2
-; CHECK-NEXT:    add a6, a6, a2
-; CHECK-NEXT:    bnez t0, .LBB35_3
+; CHECK-NEXT:    add a5, a5, a2
+; CHECK-NEXT:    vs1r.v v9, (a7)
+; CHECK-NEXT:    bne a5, a3, .LBB35_3
 ; CHECK-NEXT:  # %bb.4: # %middle.block
-; CHECK-NEXT:    beqz a5, .LBB35_7
+; CHECK-NEXT:    beqz a4, .LBB35_7
 ; CHECK-NEXT:  .LBB35_5: # %for.body.preheader
-; CHECK-NEXT:    slli a4, a4, 2
-; CHECK-NEXT:    add a2, a1, a4
-; CHECK-NEXT:    add a0, a0, a4
+; CHECK-NEXT:    slli a3, a3, 2
+; CHECK-NEXT:    add a2, a1, a3
+; CHECK-NEXT:    add a0, a0, a3
 ; CHECK-NEXT:    lui a3, 1
 ; CHECK-NEXT:    add a1, a1, a3
 ; CHECK-NEXT:  .LBB35_6: # %for.body
@@ -2486,29 +2474,28 @@ for.cond.cleanup:                                 ; preds = %vector.body
 define void @sink_splat_udiv_scalable(ptr nocapture %a, i32 signext %x) {
 ; CHECK-LABEL: sink_splat_udiv_scalable:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    csrr a5, vlenb
-; CHECK-NEXT:    srli a2, a5, 1
+; CHECK-NEXT:    csrr a2, vlenb
+; CHECK-NEXT:    srli a2, a2, 1
 ; CHECK-NEXT:    li a3, 1024
 ; CHECK-NEXT:    bgeu a3, a2, .LBB42_2
 ; CHECK-NEXT:  # %bb.1:
 ; CHECK-NEXT:    li a3, 0
 ; CHECK-NEXT:    j .LBB42_5
 ; CHECK-NEXT:  .LBB42_2: # %vector.ph
+; CHECK-NEXT:    li a5, 0
 ; CHECK-NEXT:    addi a3, a2, -1
 ; CHECK-NEXT:    andi a4, a3, 1024
 ; CHECK-NEXT:    xori a3, a4, 1024
-; CHECK-NEXT:    slli a5, a5, 1
 ; CHECK-NEXT:    vsetvli a6, zero, e32, m2, ta, ma
-; CHECK-NEXT:    mv a6, a0
-; CHECK-NEXT:    mv a7, a3
 ; CHECK-NEXT:  .LBB42_3: # %vector.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    slli a6, a5, 2
+; CHECK-NEXT:    add a6, a0, a6
 ; CHECK-NEXT:    vl2re32.v v8, (a6)
 ; CHECK-NEXT:    vdivu.vx v8, v8, a1
+; CHECK-NEXT:    add a5, a5, a2
 ; CHECK-NEXT:    vs2r.v v8, (a6)
-; CHECK-NEXT:    sub a7, a7, a2
-; CHECK-NEXT:    add a6, a6, a5
-; CHECK-NEXT:    bnez a7, .LBB42_3
+; CHECK-NEXT:    bne a5, a3, .LBB42_3
 ; CHECK-NEXT:  # %bb.4: # %middle.block
 ; CHECK-NEXT:    beqz a4, .LBB42_7
 ; CHECK-NEXT:  .LBB42_5: # %for.body.preheader
@@ -2577,29 +2564,28 @@ for.body:                                         ; preds = %for.body.preheader,
 define void @sink_splat_sdiv_scalable(ptr nocapture %a, i32 signext %x) {
 ; CHECK-LABEL: sink_splat_sdiv_scalable:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    csrr a5, vlenb
-; CHECK-NEXT:    srli a2, a5, 1
+; CHECK-NEXT:    csrr a2, vlenb
+; CHECK-NEXT:    srli a2, a2, 1
 ; CHECK-NEXT:    li a3, 1024
 ; CHECK-NEXT:    bgeu a3, a2, .LBB43_2
 ; CHECK-NEXT:  # %bb.1:
 ; CHECK-NEXT:    li a3, 0
 ; CHECK-NEXT:    j .LBB43_5
 ; CHECK-NEXT:  .LBB43_2: # %vector.ph
+; CHECK-NEXT:    li a5, 0
 ; CHECK-NEXT:    addi a3, a2, -1
 ; CHECK-NEXT:    andi a4, a3, 1024
 ; CHECK-NEXT:    xori a3, a4, 1024
-; CHECK-NEXT:    slli a5, a5, 1
 ; CHECK-NEXT:    vsetvli a6, zero, e32, m2, ta, ma
-; CHECK-NEXT:    mv a6, a0
-; CHECK-NEXT:    mv a7, a3
 ; CHECK-NEXT:  .LBB43_3: # %vector.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    slli a6, a5, 2
+; CHECK-NEXT:    add a6, a0, a6
 ; CHECK-NEXT:    vl2re32.v v8, (a6)
 ; CHECK-NEXT:    vdiv.vx v8, v8, a1
+; CHECK-NEXT:    add a5, a5, a2
 ; CHECK-NEXT:    vs2r.v v8, (a6)
-; CHECK-NEXT:    sub a7, a7, a2
-; CHECK-NEXT:    add a6, a6, a5
-; CHECK-NEXT:    bnez a7, .LBB43_3
+; CHECK-NEXT:    bne a5, a3, .LBB43_3
 ; CHECK-NEXT:  # %bb.4: # %middle.block
 ; CHECK-NEXT:    beqz a4, .LBB43_7
 ; CHECK-NEXT:  .LBB43_5: # %for.body.preheader
@@ -2668,29 +2654,28 @@ for.body:                                         ; preds = %for.body.preheader,
 define void @sink_splat_urem_scalable(ptr nocapture %a, i32 signext %x) {
 ; CHECK-LABEL: sink_splat_urem_scalable:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    csrr a5, vlenb
-; CHECK-NEXT:    srli a2, a5, 1
+; CHECK-NEXT:    csrr a2, vlenb
+; CHECK-NEXT:    srli a2, a2, 1
 ; CHECK-NEXT:    li a3, 1024
 ; CHECK-NEXT:    bgeu a3, a2, .LBB44_2
 ; CHECK-NEXT:  # %bb.1:
 ; CHECK-NEXT:    li a3, 0
 ; CHECK-NEXT:    j .LBB44_5
 ; CHECK-NEXT:  .LBB44_2: # %vector.ph
+; CHECK-NEXT:    li a5, 0
 ; CHECK-NEXT:    addi a3, a2, -1
 ; CHECK-NEXT:    andi a4, a3, 1024
 ; CHECK-NEXT:    xori a3, a4, 1024
-; CHECK-NEXT:    slli a5, a5, 1
 ; CHECK-NEXT:    vsetvli a6, zero, e32, m2, ta, ma
-; CHECK-NEXT:    mv a6, a0
-; CHECK-NEXT:    mv a7, a3
 ; CHECK-NEXT:  .LBB44_3: # %vector.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    slli a6, a5, 2
+; CHECK-NEXT:    add a6, a0, a6
 ; CHECK-NEXT:    vl2re32.v v8, (a6)
 ; CHECK-NEXT:    vremu.vx v8, v8, a1
+; CHECK-NEXT:    add a5, a5, a2
 ; CHECK-NEXT:    vs2r.v v8, (a6)
-; CHECK-NEXT:    sub a7, a7, a2
-; CHECK-NEXT:    add a6, a6, a5
-; CHECK-NEXT:    bnez a7, .LBB44_3
+; CHECK-NEXT:    bne a5, a3, .LBB44_3
 ; CHECK-NEXT:  # %bb.4: # %middle.block
 ; CHECK-NEXT:    beqz a4, .LBB44_7
 ; CHECK-NEXT:  .LBB44_5: # %for.body.preheader
@@ -2759,29 +2744,28 @@ for.body:                                         ; preds = %for.body.preheader,
 define void @sink_splat_srem_scalable(ptr nocapture %a, i32 signext %x) {
 ; CHECK-LABEL: sink_splat_srem_scalable:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    csrr a5, vlenb
-; CHECK-NEXT:    srli a2, a5, 1
+; CHECK-NEXT:    csrr a2, vlenb
+; CHECK-NEXT:    srli a2, a2, 1
 ; CHECK-NEXT:    li a3, 1024
 ; CHECK-NEXT:    bgeu a3, a2, .LBB45_2
 ; CHECK-NEXT:  # %bb.1:
 ; CHECK-NEXT:    li a3, 0
 ; CHECK-NEXT:    j .LBB45_5
 ; CHECK-NEXT:  .LBB45_2: # %vector.ph
+; CHECK-NEXT:    li a5, 0
 ; CHECK-NEXT:    addi a3, a2, -1
 ; CHECK-NEXT:    andi a4, a3, 1024
 ; CHECK-NEXT:    xori a3, a4, 1024
-; CHECK-NEXT:    slli a5, a5, 1
 ; CHECK-NEXT:    vsetvli a6, zero, e32, m2, ta, ma
-; CHECK-NEXT:    mv a6, a0
-; CHECK-NEXT:    mv a7, a3
 ; CHECK-NEXT:  .LBB45_3: # %vector.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    slli a6, a5, 2
+; CHECK-NEXT:    add a6, a0, a6
 ; CHECK-NEXT:    vl2re32.v v8, (a6)
 ; CHECK-NEXT:    vrem.vx v8, v8, a1
+; CHECK-NEXT:    add a5, a5, a2
 ; CHECK-NEXT:    vs2r.v v8, (a6)
-; CHECK-NEXT:    sub a7, a7, a2
-; CHECK-NEXT:    add a6, a6, a5
-; CHECK-NEXT:    bnez a7, .LBB45_3
+; CHECK-NEXT:    bne a5, a3, .LBB45_3
 ; CHECK-NEXT:  # %bb.4: # %middle.block
 ; CHECK-NEXT:    beqz a4, .LBB45_7
 ; CHECK-NEXT:  .LBB45_5: # %for.body.preheader
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll
index 621445fb2dc5e4..b9d39098f70ee1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll
@@ -589,17 +589,17 @@ define void @vlmax(i64 %N, ptr %c, ptr %a, ptr %b) {
 ; CHECK-NEXT:    blez a0, .LBB11_3
 ; CHECK-NEXT:  # %bb.1: # %for.body.preheader
 ; CHECK-NEXT:    li a5, 0
-; CHECK-NEXT:    slli a4, a6, 3
 ; CHECK-NEXT:  .LBB11_2: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    vle64.v v8, (a2)
-; CHECK-NEXT:    vle64.v v9, (a3)
+; CHECK-NEXT:    slli a7, a5, 3
+; CHECK-NEXT:    add a4, a2, a7
+; CHECK-NEXT:    vle64.v v8, (a4)
+; CHECK-NEXT:    add a4, a3, a7
+; CHECK-NEXT:    vle64.v v9, (a4)
 ; CHECK-NEXT:    vfadd.vv v8, v8, v9
-; CHECK-NEXT:    vse64.v v8, (a1)
+; CHECK-NEXT:    add a7, a7, a1
 ; CHECK-NEXT:    add a5, a5, a6
-; CHECK-NEXT:    add a1, a1, a4
-; CHECK-NEXT:    add a3, a3, a4
-; CHECK-NEXT:    add a2, a2, a4
+; CHECK-NEXT:    vse64.v v8, (a7)
 ; CHECK-NEXT:    blt a5, a0, .LBB11_2
 ; CHECK-NEXT:  .LBB11_3: # %for.end
 ; CHECK-NEXT:    ret
@@ -636,13 +636,13 @@ define void @vector_init_vlmax(i64 %N, ptr %c) {
 ; CHECK-NEXT:    blez a0, .LBB12_3
 ; CHECK-NEXT:  # %bb.1: # %for.body.preheader
 ; CHECK-NEXT:    li a3, 0
-; CHECK-NEXT:    slli a4, a2, 3
 ; CHECK-NEXT:    vmv.v.i v8, 0
 ; CHECK-NEXT:  .LBB12_2: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    vse64.v v8, (a1)
+; CHECK-NEXT:    slli a4, a3, 3
+; CHECK-NEXT:    add a4, a4, a1
 ; CHECK-NEXT:    add a3, a3, a2
-; CHECK-NEXT:    add a1, a1, a4
+; CHECK-NEXT:    vse64.v v8, (a4)
 ; CHECK-NEXT:    blt a3, a0, .LBB12_2
 ; CHECK-NEXT:  .LBB12_3: # %for.end
 ; CHECK-NEXT:    ret
@@ -672,15 +672,15 @@ define void @vector_init_vsetvli_N(i64 %N, ptr %c) {
 ; CHECK-NEXT:    blez a0, .LBB13_3
 ; CHECK-NEXT:  # %bb.1: # %for.body.preheader
 ; CHECK-NEXT:    li a3, 0
-; CHECK-NEXT:    slli a4, a2, 3
-; CHECK-NEXT:    vsetvli a5, zero, e64, m1, ta, ma
+; CHECK-NEXT:    vsetvli a4, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vmv.v.i v8, 0
 ; CHECK-NEXT:  .LBB13_2: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    slli a4, a3, 3
+; CHECK-NEXT:    add a4, a4, a1
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT:    vse64.v v8, (a1)
 ; CHECK-NEXT:    add a3, a3, a2
-; CHECK-NEXT:    add a1, a1, a4
+; CHECK-NEXT:    vse64.v v8, (a4)
 ; CHECK-NEXT:    blt a3, a0, .LBB13_2
 ; CHECK-NEXT:  .LBB13_3: # %for.end
 ; CHECK-NEXT:    ret
@@ -708,15 +708,15 @@ define void @vector_init_vsetvli_fv(i64 %N, ptr %c) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    li a2, 0
 ; CHECK-NEXT:    vsetivli a3, 4, e64, m1, ta, ma
-; CHECK-NEXT:    slli a4, a3, 3
-; CHECK-NEXT:    vsetvli a5, zero, e64, m1, ta, ma
+; CHECK-NEXT:    vsetvli a4, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vmv.v.i v8, 0
 ; CHECK-NEXT:  .LBB14_1: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    slli a4, a2, 3
+; CHECK-NEXT:    add a4, a4, a1
 ; CHECK-NEXT:    vsetivli zero, 4, e64, m1, ta, ma
-; CHECK-NEXT:    vse64.v v8, (a1)
 ; CHECK-NEXT:    add a2, a2, a3
-; CHECK-NEXT:    add a1, a1, a4
+; CHECK-NEXT:    vse64.v v8, (a4)
 ; CHECK-NEXT:    blt a2, a0, .LBB14_1
 ; CHECK-NEXT:  # %bb.2: # %for.end
 ; CHECK-NEXT:    ret



More information about the llvm-commits mailing list