[llvm] MTM: improve operand latency when missing sched info (PR #101389)
Ramkumar Ramachandra via llvm-commits
llvm-commits at lists.llvm.org
Fri Aug 2 07:12:06 PDT 2024
https://github.com/artagnon updated https://github.com/llvm/llvm-project/pull/101389
>From 1f6026b726145fb2d2f5aa2d1edf02e2b5d74eb4 Mon Sep 17 00:00:00 2001
From: Ramkumar Ramachandra <ramkumar.ramachandra at codasip.com>
Date: Wed, 31 Jul 2024 16:49:45 +0100
Subject: [PATCH 1/4] MTM: improve operand latency when missing sched info
TargetSchedModel::computeOperandLatency is supposed to return the exact
latency between two MIs, although it is observed that InstrSchedModel
and InstrItineraries are often unavailable in many real-world scenarios.
When these two pieces of information are not available, the function
returns an estimate that is much too conservative: the default def
latency. MachineTraceMetrics is one of the callers affected quite badly
by these conservative estimates. To improve the estimate, and let
callers of MTM generate better code, offset the default def latency by
the estiamted cycles elapsed between the def MI and use MI. Since we're
trying to improve codegen in the case when no scheduling information is
unavailable, it is impossible to determine the number of cycles elapsed
between the two MIs, and we use the distance between them as a crude
approximate. In practice, this improvement of one crude estimate by
offseting it with another crude estimate leads to better codegen on
average, and yields huge gains on standard benchmarks.
---
llvm/lib/CodeGen/MachineTraceMetrics.cpp | 57 ++++++++++++++++++++----
1 file changed, 49 insertions(+), 8 deletions(-)
diff --git a/llvm/lib/CodeGen/MachineTraceMetrics.cpp b/llvm/lib/CodeGen/MachineTraceMetrics.cpp
index bf3add010574b..84bbdb008f8f3 100644
--- a/llvm/lib/CodeGen/MachineTraceMetrics.cpp
+++ b/llvm/lib/CodeGen/MachineTraceMetrics.cpp
@@ -20,6 +20,7 @@
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/TargetInstrInfo.h"
#include "llvm/CodeGen/TargetRegisterInfo.h"
#include "llvm/CodeGen/TargetSchedule.h"
#include "llvm/CodeGen/TargetSubtargetInfo.h"
@@ -761,6 +762,46 @@ static void updatePhysDepsDownwards(const MachineInstr *UseMI,
}
}
+/// Returns the distance between DefMI and UseMI if they're non-null and in the
+/// same BasicBlock, 0 otherwise.
+static unsigned computeDefUseDist(const MachineInstr *DefMI,
+ const MachineInstr *UseMI) {
+ if (!DefMI || !UseMI || DefMI == UseMI)
+ return 0;
+ const MachineBasicBlock *ParentBB = DefMI->getParent();
+ if (ParentBB != UseMI->getParent())
+ return 0;
+ auto DefIt = llvm::find_if(
+ *ParentBB, [DefMI](const MachineInstr &MI) { return DefMI == &MI; });
+ auto UseIt = llvm::find_if(
+ *ParentBB, [UseMI](const MachineInstr &MI) { return UseMI == &MI; });
+ return std::distance(DefIt, UseIt);
+}
+
+/// Wraps Sched.computeOperandLatency, accounting for the case when
+/// InstrSchedModel and InstrItineraries are not available: in this case,
+/// Sched.computeOperandLatency returns DefaultDefLatency, which is a very rough
+/// approximate; to improve this approximate, offset it by the approximate
+/// cycles elapsed from DefMI to UseMI (since the MIs could be re-ordered by the
+/// scheduler, and we don't have this information, this distance cannot be known
+/// exactly). When scheduling information is available,
+/// Sched.computeOperandLatency returns a much better estimate (especially if
+/// UseMI is non-null), so we just return that.
+static unsigned computeOperandLatency(const TargetSchedModel &Sched,
+ const MachineInstr *DefMI,
+ unsigned DefOperIdx,
+ const MachineInstr *UseMI,
+ unsigned UseOperIdx) {
+ assert(DefMI && "Non-null DefMI expected");
+ if (!Sched.hasInstrSchedModel() && !Sched.hasInstrItineraries()) {
+ unsigned DefaultDefLatency = Sched.getInstrInfo()->defaultDefLatency(
+ *Sched.getMCSchedModel(), *DefMI);
+ unsigned DefUseDist = computeDefUseDist(DefMI, UseMI);
+ return DefaultDefLatency > DefUseDist ? DefaultDefLatency - DefUseDist : 0;
+ }
+ return Sched.computeOperandLatency(DefMI, DefOperIdx, UseMI, UseOperIdx);
+}
+
/// The length of the critical path through a trace is the maximum of two path
/// lengths:
///
@@ -813,8 +854,8 @@ updateDepth(MachineTraceMetrics::TraceBlockInfo &TBI, const MachineInstr &UseMI,
unsigned DepCycle = Cycles.lookup(Dep.DefMI).Depth;
// Add latency if DefMI is a real instruction. Transients get latency 0.
if (!Dep.DefMI->isTransient())
- DepCycle += MTM.SchedModel
- .computeOperandLatency(Dep.DefMI, Dep.DefOp, &UseMI, Dep.UseOp);
+ DepCycle += computeOperandLatency(MTM.SchedModel, Dep.DefMI, Dep.DefOp,
+ &UseMI, Dep.UseOp);
Cycle = std::max(Cycle, DepCycle);
}
// Remember the instruction depth.
@@ -929,8 +970,8 @@ static unsigned updatePhysDepsUpwards(const MachineInstr &MI, unsigned Height,
if (!MI.isTransient()) {
// We may not know the UseMI of this dependency, if it came from the
// live-in list. SchedModel can handle a NULL UseMI.
- DepHeight += SchedModel.computeOperandLatency(&MI, MO.getOperandNo(),
- I->MI, I->Op);
+ DepHeight += computeOperandLatency(SchedModel, &MI, MO.getOperandNo(),
+ I->MI, I->Op);
}
Height = std::max(Height, DepHeight);
// This regunit is dead above MI.
@@ -965,8 +1006,8 @@ static bool pushDepHeight(const DataDep &Dep, const MachineInstr &UseMI,
const TargetInstrInfo *TII) {
// Adjust height by Dep.DefMI latency.
if (!Dep.DefMI->isTransient())
- UseHeight += SchedModel.computeOperandLatency(Dep.DefMI, Dep.DefOp, &UseMI,
- Dep.UseOp);
+ UseHeight += computeOperandLatency(SchedModel, Dep.DefMI, Dep.DefOp, &UseMI,
+ Dep.UseOp);
// Update Heights[DefMI] to be the maximum height seen.
MIHeightMap::iterator I;
@@ -1192,8 +1233,8 @@ MachineTraceMetrics::Trace::getPHIDepth(const MachineInstr &PHI) const {
unsigned DepCycle = getInstrCycles(*Dep.DefMI).Depth;
// Add latency if DefMI is a real instruction. Transients get latency 0.
if (!Dep.DefMI->isTransient())
- DepCycle += TE.MTM.SchedModel.computeOperandLatency(Dep.DefMI, Dep.DefOp,
- &PHI, Dep.UseOp);
+ DepCycle += computeOperandLatency(TE.MTM.SchedModel, Dep.DefMI, Dep.DefOp,
+ &PHI, Dep.UseOp);
return DepCycle;
}
>From 691e104086a9b107c55a3503a897693338b1a554 Mon Sep 17 00:00:00 2001
From: Ramkumar Ramachandra <ramkumar.ramachandra at codasip.com>
Date: Thu, 1 Aug 2024 12:29:35 +0100
Subject: [PATCH 2/4] MTM: account for IssueWidth; improve patch
---
llvm/lib/CodeGen/MachineTraceMetrics.cpp | 44 +++++++++++++++++-------
1 file changed, 31 insertions(+), 13 deletions(-)
diff --git a/llvm/lib/CodeGen/MachineTraceMetrics.cpp b/llvm/lib/CodeGen/MachineTraceMetrics.cpp
index 84bbdb008f8f3..50956ae37c4c1 100644
--- a/llvm/lib/CodeGen/MachineTraceMetrics.cpp
+++ b/llvm/lib/CodeGen/MachineTraceMetrics.cpp
@@ -762,20 +762,35 @@ static void updatePhysDepsDownwards(const MachineInstr *UseMI,
}
}
-/// Returns the distance between DefMI and UseMI if they're non-null and in the
-/// same BasicBlock, 0 otherwise.
-static unsigned computeDefUseDist(const MachineInstr *DefMI,
- const MachineInstr *UseMI) {
+/// Estimates the number of cycles elapsed between DefMI and UseMI if they're
+/// non-null and in the same BasicBlock. Returns std::nullopt when UseMI is in a
+/// different MBB than DefMI, or when it is a dangling MI.
+static std::optional<unsigned>
+estimateDefUseCycles(const TargetSchedModel &Sched, const MachineInstr *DefMI,
+ const MachineInstr *UseMI) {
if (!DefMI || !UseMI || DefMI == UseMI)
return 0;
const MachineBasicBlock *ParentBB = DefMI->getParent();
if (ParentBB != UseMI->getParent())
- return 0;
- auto DefIt = llvm::find_if(
- *ParentBB, [DefMI](const MachineInstr &MI) { return DefMI == &MI; });
- auto UseIt = llvm::find_if(
- *ParentBB, [UseMI](const MachineInstr &MI) { return UseMI == &MI; });
- return std::distance(DefIt, UseIt);
+ return std::nullopt;
+
+ const auto DefIt =
+ llvm::find_if(ParentBB->instrs(),
+ [DefMI](const MachineInstr &MI) { return DefMI == &MI; });
+ const auto UseIt =
+ llvm::find_if(ParentBB->instrs(),
+ [UseMI](const MachineInstr &MI) { return UseMI == &MI; });
+ assert(std::distance(DefIt, UseIt) > 0 &&
+ "Def expected to appear before use");
+ unsigned NumMicroOps = 0;
+ for (auto It = DefIt; It != UseIt; ++It) {
+ // In some cases, UseMI is a dangling MI beyond the end of the MBB.
+ if (It.isEnd())
+ return std::nullopt;
+
+ NumMicroOps += Sched.getNumMicroOps(&*It);
+ }
+ return NumMicroOps / Sched.getIssueWidth() - 1;
}
/// Wraps Sched.computeOperandLatency, accounting for the case when
@@ -783,7 +798,7 @@ static unsigned computeDefUseDist(const MachineInstr *DefMI,
/// Sched.computeOperandLatency returns DefaultDefLatency, which is a very rough
/// approximate; to improve this approximate, offset it by the approximate
/// cycles elapsed from DefMI to UseMI (since the MIs could be re-ordered by the
-/// scheduler, and we don't have this information, this distance cannot be known
+/// scheduler, and we don't have this information, this cannot be known
/// exactly). When scheduling information is available,
/// Sched.computeOperandLatency returns a much better estimate (especially if
/// UseMI is non-null), so we just return that.
@@ -796,8 +811,11 @@ static unsigned computeOperandLatency(const TargetSchedModel &Sched,
if (!Sched.hasInstrSchedModel() && !Sched.hasInstrItineraries()) {
unsigned DefaultDefLatency = Sched.getInstrInfo()->defaultDefLatency(
*Sched.getMCSchedModel(), *DefMI);
- unsigned DefUseDist = computeDefUseDist(DefMI, UseMI);
- return DefaultDefLatency > DefUseDist ? DefaultDefLatency - DefUseDist : 0;
+ std::optional<unsigned> DefUseCycles =
+ estimateDefUseCycles(Sched, DefMI, UseMI);
+ if (!DefUseCycles || DefaultDefLatency <= DefUseCycles)
+ return 0;
+ return DefaultDefLatency - *DefUseCycles;
}
return Sched.computeOperandLatency(DefMI, DefOperIdx, UseMI, UseOperIdx);
}
>From 8f59a6ed19b5020b35f32e228a85d172409d6e91 Mon Sep 17 00:00:00 2001
From: Ramkumar Ramachandra <ramkumar.ramachandra at codasip.com>
Date: Fri, 2 Aug 2024 00:19:22 +0100
Subject: [PATCH 3/4] MTM: fix off-by-one error
---
llvm/lib/CodeGen/MachineTraceMetrics.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/llvm/lib/CodeGen/MachineTraceMetrics.cpp b/llvm/lib/CodeGen/MachineTraceMetrics.cpp
index 50956ae37c4c1..f975672dfb73d 100644
--- a/llvm/lib/CodeGen/MachineTraceMetrics.cpp
+++ b/llvm/lib/CodeGen/MachineTraceMetrics.cpp
@@ -790,7 +790,7 @@ estimateDefUseCycles(const TargetSchedModel &Sched, const MachineInstr *DefMI,
NumMicroOps += Sched.getNumMicroOps(&*It);
}
- return NumMicroOps / Sched.getIssueWidth() - 1;
+ return NumMicroOps / Sched.getIssueWidth();
}
/// Wraps Sched.computeOperandLatency, accounting for the case when
>From 0b08c8a5e097c0b06f4f6e572fab7d184c6070bf Mon Sep 17 00:00:00 2001
From: Ramkumar Ramachandra <ramkumar.ramachandra at codasip.com>
Date: Fri, 2 Aug 2024 15:01:21 +0100
Subject: [PATCH 4/4] test/CodeGen/RISCV: update tests as example
---
.../test/CodeGen/RISCV/GlobalISel/bitmanip.ll | 44 ++--
llvm/test/CodeGen/RISCV/GlobalISel/vararg.ll | 12 +-
llvm/test/CodeGen/RISCV/addcarry.ll | 4 +-
.../CodeGen/RISCV/atomicrmw-uinc-udec-wrap.ll | 4 +-
.../RISCV/calling-conv-ilp32-ilp32f-common.ll | 20 +-
...calling-conv-ilp32-ilp32f-ilp32d-common.ll | 28 +--
.../test/CodeGen/RISCV/calling-conv-ilp32e.ll | 56 ++---
.../calling-conv-lp64-lp64f-lp64d-common.ll | 14 +-
llvm/test/CodeGen/RISCV/compress.ll | 2 +-
llvm/test/CodeGen/RISCV/copysign-casts.ll | 8 +-
llvm/test/CodeGen/RISCV/div-pow2.ll | 10 +-
llvm/test/CodeGen/RISCV/float-intrinsics.ll | 20 +-
llvm/test/CodeGen/RISCV/iabs.ll | 40 +--
llvm/test/CodeGen/RISCV/machine-combiner.mir | 9 +-
.../CodeGen/RISCV/misched-load-clustering.ll | 6 +-
llvm/test/CodeGen/RISCV/mul.ll | 50 ++--
llvm/test/CodeGen/RISCV/neg-abs.ll | 12 +-
.../test/CodeGen/RISCV/reduction-formation.ll | 84 +++----
llvm/test/CodeGen/RISCV/rv32e.ll | 4 +-
llvm/test/CodeGen/RISCV/rv32zba.ll | 4 +-
llvm/test/CodeGen/RISCV/rv32zbb.ll | 2 +-
llvm/test/CodeGen/RISCV/rv64e.ll | 4 +-
llvm/test/CodeGen/RISCV/rv64zba.ll | 24 +-
llvm/test/CodeGen/RISCV/rvv/compressstore.ll | 2 +-
.../CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll | 32 +--
.../rvv/fixed-vectors-int-explodevector.ll | 234 +++++++++---------
.../rvv/fixed-vectors-reduction-formation.ll | 10 +-
.../CodeGen/RISCV/rvv/fixed-vectors-store.ll | 6 +-
.../rvv/fixed-vectors-strided-load-combine.ll | 4 +-
.../fixed-vectors-strided-load-store-asm.ll | 26 +-
.../RISCV/rvv/vector-reassociations.ll | 14 +-
.../CodeGen/RISCV/split-udiv-by-constant.ll | 4 +-
llvm/test/CodeGen/RISCV/srem-lkk.ll | 4 +-
.../CodeGen/RISCV/srem-seteq-illegal-types.ll | 42 ++--
llvm/test/CodeGen/RISCV/srem-vector-lkk.ll | 66 ++---
llvm/test/CodeGen/RISCV/urem-lkk.ll | 4 +-
.../CodeGen/RISCV/urem-seteq-illegal-types.ll | 28 +--
llvm/test/CodeGen/RISCV/urem-vector-lkk.ll | 38 +--
llvm/test/CodeGen/RISCV/xaluo.ll | 6 +-
llvm/test/CodeGen/RISCV/xtheadmac.ll | 6 +-
llvm/test/CodeGen/RISCV/xtheadmemidx.ll | 2 +-
41 files changed, 496 insertions(+), 493 deletions(-)
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/bitmanip.ll b/llvm/test/CodeGen/RISCV/GlobalISel/bitmanip.ll
index 5c42fefb95b39..69261126cd8b0 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/bitmanip.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/bitmanip.ll
@@ -94,15 +94,15 @@ define i7 @bitreverse_i7(i7 %x) {
; RV32-NEXT: or a1, a1, a2
; RV32-NEXT: slli a2, a0, 2
; RV32-NEXT: andi a2, a2, 16
+; RV32-NEXT: or a1, a1, a2
; RV32-NEXT: andi a0, a0, 127
-; RV32-NEXT: andi a3, a0, 8
-; RV32-NEXT: or a2, a2, a3
+; RV32-NEXT: andi a2, a0, 8
; RV32-NEXT: or a1, a1, a2
; RV32-NEXT: srli a2, a0, 2
; RV32-NEXT: andi a2, a2, 4
-; RV32-NEXT: srli a3, a0, 4
-; RV32-NEXT: andi a3, a3, 2
-; RV32-NEXT: or a2, a2, a3
+; RV32-NEXT: or a1, a1, a2
+; RV32-NEXT: srli a2, a0, 4
+; RV32-NEXT: andi a2, a2, 2
; RV32-NEXT: or a1, a1, a2
; RV32-NEXT: srli a0, a0, 6
; RV32-NEXT: or a0, a1, a0
@@ -117,15 +117,15 @@ define i7 @bitreverse_i7(i7 %x) {
; RV64-NEXT: or a1, a1, a2
; RV64-NEXT: slli a2, a0, 2
; RV64-NEXT: andi a2, a2, 16
+; RV64-NEXT: or a1, a1, a2
; RV64-NEXT: andi a0, a0, 127
-; RV64-NEXT: andi a3, a0, 8
-; RV64-NEXT: or a2, a2, a3
+; RV64-NEXT: andi a2, a0, 8
; RV64-NEXT: or a1, a1, a2
; RV64-NEXT: srliw a2, a0, 2
; RV64-NEXT: andi a2, a2, 4
-; RV64-NEXT: srliw a3, a0, 4
-; RV64-NEXT: andi a3, a3, 2
-; RV64-NEXT: or a2, a2, a3
+; RV64-NEXT: or a1, a1, a2
+; RV64-NEXT: srliw a2, a0, 4
+; RV64-NEXT: andi a2, a2, 2
; RV64-NEXT: or a1, a1, a2
; RV64-NEXT: srliw a0, a0, 6
; RV64-NEXT: or a0, a1, a0
@@ -145,24 +145,24 @@ define i24 @bitreverse_i24(i24 %x) {
; RV32-NEXT: or a0, a0, a1
; RV32-NEXT: lui a1, 1048335
; RV32-NEXT: addi a1, a1, 240
-; RV32-NEXT: and a3, a1, a2
-; RV32-NEXT: and a3, a0, a3
+; RV32-NEXT: and a3, a0, a1
+; RV32-NEXT: and a3, a3, a2
; RV32-NEXT: srli a3, a3, 4
; RV32-NEXT: slli a0, a0, 4
; RV32-NEXT: and a0, a0, a1
; RV32-NEXT: or a0, a3, a0
; RV32-NEXT: lui a1, 1047757
; RV32-NEXT: addi a1, a1, -820
-; RV32-NEXT: and a3, a1, a2
-; RV32-NEXT: and a3, a0, a3
+; RV32-NEXT: and a3, a0, a1
+; RV32-NEXT: and a3, a3, a2
; RV32-NEXT: srli a3, a3, 2
; RV32-NEXT: slli a0, a0, 2
; RV32-NEXT: and a0, a0, a1
; RV32-NEXT: or a0, a3, a0
; RV32-NEXT: lui a1, 1047211
; RV32-NEXT: addi a1, a1, -1366
-; RV32-NEXT: and a2, a1, a2
-; RV32-NEXT: and a2, a0, a2
+; RV32-NEXT: and a3, a0, a1
+; RV32-NEXT: and a2, a3, a2
; RV32-NEXT: srli a2, a2, 1
; RV32-NEXT: slli a0, a0, 1
; RV32-NEXT: and a0, a0, a1
@@ -179,24 +179,24 @@ define i24 @bitreverse_i24(i24 %x) {
; RV64-NEXT: or a0, a0, a1
; RV64-NEXT: lui a1, 1048335
; RV64-NEXT: addi a1, a1, 240
-; RV64-NEXT: and a3, a1, a2
-; RV64-NEXT: and a3, a0, a3
+; RV64-NEXT: and a3, a0, a1
+; RV64-NEXT: and a3, a3, a2
; RV64-NEXT: srliw a3, a3, 4
; RV64-NEXT: slli a0, a0, 4
; RV64-NEXT: and a0, a0, a1
; RV64-NEXT: or a0, a3, a0
; RV64-NEXT: lui a1, 1047757
; RV64-NEXT: addi a1, a1, -820
-; RV64-NEXT: and a3, a1, a2
-; RV64-NEXT: and a3, a0, a3
+; RV64-NEXT: and a3, a0, a1
+; RV64-NEXT: and a3, a3, a2
; RV64-NEXT: srliw a3, a3, 2
; RV64-NEXT: slli a0, a0, 2
; RV64-NEXT: and a0, a0, a1
; RV64-NEXT: or a0, a3, a0
; RV64-NEXT: lui a1, 1047211
; RV64-NEXT: addiw a1, a1, -1366
-; RV64-NEXT: and a2, a1, a2
-; RV64-NEXT: and a2, a0, a2
+; RV64-NEXT: and a3, a0, a1
+; RV64-NEXT: and a2, a3, a2
; RV64-NEXT: srliw a2, a2, 1
; RV64-NEXT: slliw a0, a0, 1
; RV64-NEXT: and a0, a0, a1
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/vararg.ll b/llvm/test/CodeGen/RISCV/GlobalISel/vararg.ll
index d55adf371119b..5723c4b9197a6 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/vararg.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/vararg.ll
@@ -1266,8 +1266,8 @@ define i32 @va4_va_copy(i32 %argno, ...) nounwind {
; RV32-NEXT: sw a3, 4(sp)
; RV32-NEXT: lw a2, 0(a2)
; RV32-NEXT: add a0, a0, s0
-; RV32-NEXT: add a1, a1, a2
; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: add a0, a0, a2
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 48
@@ -1319,8 +1319,8 @@ define i32 @va4_va_copy(i32 %argno, ...) nounwind {
; RV64-NEXT: sd a3, 8(sp)
; RV64-NEXT: lw a2, 0(a2)
; RV64-NEXT: add a0, a0, s0
-; RV64-NEXT: add a1, a1, a2
-; RV64-NEXT: addw a0, a0, a1
+; RV64-NEXT: add a0, a0, a1
+; RV64-NEXT: addw a0, a0, a2
; RV64-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 96
@@ -1371,8 +1371,8 @@ define i32 @va4_va_copy(i32 %argno, ...) nounwind {
; RV32-WITHFP-NEXT: sw a3, -16(s0)
; RV32-WITHFP-NEXT: lw a2, 0(a2)
; RV32-WITHFP-NEXT: add a0, a0, s1
-; RV32-WITHFP-NEXT: add a1, a1, a2
; RV32-WITHFP-NEXT: add a0, a0, a1
+; RV32-WITHFP-NEXT: add a0, a0, a2
; RV32-WITHFP-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32-WITHFP-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
; RV32-WITHFP-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
@@ -1427,8 +1427,8 @@ define i32 @va4_va_copy(i32 %argno, ...) nounwind {
; RV64-WITHFP-NEXT: sd a3, -32(s0)
; RV64-WITHFP-NEXT: lw a2, 0(a2)
; RV64-WITHFP-NEXT: add a0, a0, s1
-; RV64-WITHFP-NEXT: add a1, a1, a2
-; RV64-WITHFP-NEXT: addw a0, a0, a1
+; RV64-WITHFP-NEXT: add a0, a0, a1
+; RV64-WITHFP-NEXT: addw a0, a0, a2
; RV64-WITHFP-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
; RV64-WITHFP-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
; RV64-WITHFP-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/addcarry.ll b/llvm/test/CodeGen/RISCV/addcarry.ll
index 3a4163a8bb50f..053b98755417b 100644
--- a/llvm/test/CodeGen/RISCV/addcarry.ll
+++ b/llvm/test/CodeGen/RISCV/addcarry.ll
@@ -18,9 +18,9 @@ define i64 @addcarry(i64 %x, i64 %y) nounwind {
; RISCV32-NEXT: sltu a7, a4, a6
; RISCV32-NEXT: sltu a5, a6, a5
; RISCV32-NEXT: mulhu a6, a0, a3
-; RISCV32-NEXT: mulhu t0, a1, a2
-; RISCV32-NEXT: add a6, a6, t0
; RISCV32-NEXT: add a5, a6, a5
+; RISCV32-NEXT: mulhu a6, a1, a2
+; RISCV32-NEXT: add a5, a5, a6
; RISCV32-NEXT: add a5, a5, a7
; RISCV32-NEXT: mul a6, a1, a3
; RISCV32-NEXT: add a5, a5, a6
diff --git a/llvm/test/CodeGen/RISCV/atomicrmw-uinc-udec-wrap.ll b/llvm/test/CodeGen/RISCV/atomicrmw-uinc-udec-wrap.ll
index 634ed45044ee2..672625c182d0b 100644
--- a/llvm/test/CodeGen/RISCV/atomicrmw-uinc-udec-wrap.ll
+++ b/llvm/test/CodeGen/RISCV/atomicrmw-uinc-udec-wrap.ll
@@ -227,8 +227,8 @@ define i16 @atomicrmw_uinc_wrap_i16(ptr %ptr, i16 %val) {
; RV32IA-NEXT: addi a5, a5, 1
; RV32IA-NEXT: sltu a7, a7, a1
; RV32IA-NEXT: neg a7, a7
-; RV32IA-NEXT: and a5, a5, a3
; RV32IA-NEXT: and a5, a7, a5
+; RV32IA-NEXT: and a5, a5, a3
; RV32IA-NEXT: sll a5, a5, a0
; RV32IA-NEXT: and a7, a6, a4
; RV32IA-NEXT: or a7, a7, a5
@@ -307,8 +307,8 @@ define i16 @atomicrmw_uinc_wrap_i16(ptr %ptr, i16 %val) {
; RV64IA-NEXT: addi a6, a6, 1
; RV64IA-NEXT: sltu t0, t0, a1
; RV64IA-NEXT: negw t0, t0
-; RV64IA-NEXT: and a6, a6, a3
; RV64IA-NEXT: and a6, t0, a6
+; RV64IA-NEXT: and a6, a6, a3
; RV64IA-NEXT: sllw a6, a6, a0
; RV64IA-NEXT: and a4, a4, a5
; RV64IA-NEXT: or a6, a4, a6
diff --git a/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-common.ll b/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-common.ll
index 278187f62cd75..8bcdb059a95fb 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-common.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-common.ll
@@ -94,15 +94,15 @@ define i32 @callee_aligned_stack(i32 %a, i32 %b, fp128 %c, i32 %d, i32 %e, i64 %
; RV32I-FPELIM-LABEL: callee_aligned_stack:
; RV32I-FPELIM: # %bb.0:
; RV32I-FPELIM-NEXT: lw a0, 0(a2)
-; RV32I-FPELIM-NEXT: lw a1, 8(sp)
+; RV32I-FPELIM-NEXT: lw a1, 20(sp)
; RV32I-FPELIM-NEXT: lw a2, 0(sp)
-; RV32I-FPELIM-NEXT: lw a3, 20(sp)
+; RV32I-FPELIM-NEXT: lw a3, 8(sp)
; RV32I-FPELIM-NEXT: lw a4, 16(sp)
; RV32I-FPELIM-NEXT: add a0, a0, a7
-; RV32I-FPELIM-NEXT: add a1, a2, a1
-; RV32I-FPELIM-NEXT: add a0, a0, a1
-; RV32I-FPELIM-NEXT: add a3, a4, a3
+; RV32I-FPELIM-NEXT: add a0, a0, a2
; RV32I-FPELIM-NEXT: add a0, a0, a3
+; RV32I-FPELIM-NEXT: add a0, a0, a4
+; RV32I-FPELIM-NEXT: add a0, a0, a1
; RV32I-FPELIM-NEXT: ret
;
; RV32I-WITHFP-LABEL: callee_aligned_stack:
@@ -112,15 +112,15 @@ define i32 @callee_aligned_stack(i32 %a, i32 %b, fp128 %c, i32 %d, i32 %e, i64 %
; RV32I-WITHFP-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32I-WITHFP-NEXT: addi s0, sp, 16
; RV32I-WITHFP-NEXT: lw a0, 0(a2)
-; RV32I-WITHFP-NEXT: lw a1, 8(s0)
+; RV32I-WITHFP-NEXT: lw a1, 20(s0)
; RV32I-WITHFP-NEXT: lw a2, 0(s0)
-; RV32I-WITHFP-NEXT: lw a3, 20(s0)
+; RV32I-WITHFP-NEXT: lw a3, 8(s0)
; RV32I-WITHFP-NEXT: lw a4, 16(s0)
; RV32I-WITHFP-NEXT: add a0, a0, a7
-; RV32I-WITHFP-NEXT: add a1, a2, a1
-; RV32I-WITHFP-NEXT: add a0, a0, a1
-; RV32I-WITHFP-NEXT: add a3, a4, a3
+; RV32I-WITHFP-NEXT: add a0, a0, a2
; RV32I-WITHFP-NEXT: add a0, a0, a3
+; RV32I-WITHFP-NEXT: add a0, a0, a4
+; RV32I-WITHFP-NEXT: add a0, a0, a1
; RV32I-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-WITHFP-NEXT: addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-ilp32d-common.ll b/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-ilp32d-common.ll
index 231ed159ab206..4906cc8eb73a5 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-ilp32d-common.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-ilp32d-common.ll
@@ -87,16 +87,16 @@ define i32 @callee_many_scalars(i8 %a, i16 %b, i32 %c, i64 %d, i32 %e, i32 %f, i
; RV32I-FPELIM-NEXT: andi a0, a0, 255
; RV32I-FPELIM-NEXT: slli a1, a1, 16
; RV32I-FPELIM-NEXT: srli a1, a1, 16
-; RV32I-FPELIM-NEXT: add a0, a0, a2
; RV32I-FPELIM-NEXT: add a0, a0, a1
+; RV32I-FPELIM-NEXT: add a0, a0, a2
; RV32I-FPELIM-NEXT: xor a1, a4, t1
; RV32I-FPELIM-NEXT: xor a2, a3, a7
; RV32I-FPELIM-NEXT: or a1, a2, a1
; RV32I-FPELIM-NEXT: seqz a1, a1
+; RV32I-FPELIM-NEXT: add a0, a1, a0
; RV32I-FPELIM-NEXT: add a0, a0, a5
; RV32I-FPELIM-NEXT: add a0, a0, a6
; RV32I-FPELIM-NEXT: add a0, a0, t0
-; RV32I-FPELIM-NEXT: add a0, a1, a0
; RV32I-FPELIM-NEXT: ret
;
; RV32I-WITHFP-LABEL: callee_many_scalars:
@@ -110,16 +110,16 @@ define i32 @callee_many_scalars(i8 %a, i16 %b, i32 %c, i64 %d, i32 %e, i32 %f, i
; RV32I-WITHFP-NEXT: andi a0, a0, 255
; RV32I-WITHFP-NEXT: slli a1, a1, 16
; RV32I-WITHFP-NEXT: srli a1, a1, 16
-; RV32I-WITHFP-NEXT: add a0, a0, a2
; RV32I-WITHFP-NEXT: add a0, a0, a1
+; RV32I-WITHFP-NEXT: add a0, a0, a2
; RV32I-WITHFP-NEXT: xor a1, a4, t1
; RV32I-WITHFP-NEXT: xor a2, a3, a7
; RV32I-WITHFP-NEXT: or a1, a2, a1
; RV32I-WITHFP-NEXT: seqz a1, a1
+; RV32I-WITHFP-NEXT: add a0, a1, a0
; RV32I-WITHFP-NEXT: add a0, a0, a5
; RV32I-WITHFP-NEXT: add a0, a0, a6
; RV32I-WITHFP-NEXT: add a0, a0, t0
-; RV32I-WITHFP-NEXT: add a0, a1, a0
; RV32I-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-WITHFP-NEXT: addi sp, sp, 16
@@ -614,15 +614,15 @@ define i32 @callee_aligned_stack(i32 %a, i32 %b, fp128 %c, i32 %d, i32 %e, i64 %
; RV32I-FPELIM-LABEL: callee_aligned_stack:
; RV32I-FPELIM: # %bb.0:
; RV32I-FPELIM-NEXT: lw a0, 0(a2)
-; RV32I-FPELIM-NEXT: lw a1, 8(sp)
+; RV32I-FPELIM-NEXT: lw a1, 20(sp)
; RV32I-FPELIM-NEXT: lw a2, 0(sp)
-; RV32I-FPELIM-NEXT: lw a3, 20(sp)
+; RV32I-FPELIM-NEXT: lw a3, 8(sp)
; RV32I-FPELIM-NEXT: lw a4, 16(sp)
; RV32I-FPELIM-NEXT: add a0, a0, a7
-; RV32I-FPELIM-NEXT: add a1, a2, a1
-; RV32I-FPELIM-NEXT: add a0, a0, a1
-; RV32I-FPELIM-NEXT: add a3, a4, a3
+; RV32I-FPELIM-NEXT: add a0, a0, a2
; RV32I-FPELIM-NEXT: add a0, a0, a3
+; RV32I-FPELIM-NEXT: add a0, a0, a4
+; RV32I-FPELIM-NEXT: add a0, a0, a1
; RV32I-FPELIM-NEXT: ret
;
; RV32I-WITHFP-LABEL: callee_aligned_stack:
@@ -632,15 +632,15 @@ define i32 @callee_aligned_stack(i32 %a, i32 %b, fp128 %c, i32 %d, i32 %e, i64 %
; RV32I-WITHFP-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32I-WITHFP-NEXT: addi s0, sp, 16
; RV32I-WITHFP-NEXT: lw a0, 0(a2)
-; RV32I-WITHFP-NEXT: lw a1, 8(s0)
+; RV32I-WITHFP-NEXT: lw a1, 20(s0)
; RV32I-WITHFP-NEXT: lw a2, 0(s0)
-; RV32I-WITHFP-NEXT: lw a3, 20(s0)
+; RV32I-WITHFP-NEXT: lw a3, 8(s0)
; RV32I-WITHFP-NEXT: lw a4, 16(s0)
; RV32I-WITHFP-NEXT: add a0, a0, a7
-; RV32I-WITHFP-NEXT: add a1, a2, a1
-; RV32I-WITHFP-NEXT: add a0, a0, a1
-; RV32I-WITHFP-NEXT: add a3, a4, a3
+; RV32I-WITHFP-NEXT: add a0, a0, a2
; RV32I-WITHFP-NEXT: add a0, a0, a3
+; RV32I-WITHFP-NEXT: add a0, a0, a4
+; RV32I-WITHFP-NEXT: add a0, a0, a1
; RV32I-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-WITHFP-NEXT: addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/calling-conv-ilp32e.ll b/llvm/test/CodeGen/RISCV/calling-conv-ilp32e.ll
index d08cf577b1bdd..6969186999766 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-ilp32e.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-ilp32e.ll
@@ -529,16 +529,16 @@ define i32 @callee_aligned_stack(i32 %a, i32 %b, fp128 %c, i32 %d, i32 %e, i64 %
; ILP32E-FPELIM-LABEL: callee_aligned_stack:
; ILP32E-FPELIM: # %bb.0:
; ILP32E-FPELIM-NEXT: lw a0, 0(a2)
-; ILP32E-FPELIM-NEXT: lw a1, 12(sp)
+; ILP32E-FPELIM-NEXT: lw a1, 24(sp)
; ILP32E-FPELIM-NEXT: lw a2, 4(sp)
; ILP32E-FPELIM-NEXT: lw a3, 8(sp)
-; ILP32E-FPELIM-NEXT: lw a4, 24(sp)
+; ILP32E-FPELIM-NEXT: lw a4, 12(sp)
; ILP32E-FPELIM-NEXT: lw a5, 20(sp)
; ILP32E-FPELIM-NEXT: add a0, a0, a2
-; ILP32E-FPELIM-NEXT: add a1, a3, a1
-; ILP32E-FPELIM-NEXT: add a0, a0, a1
-; ILP32E-FPELIM-NEXT: add a4, a5, a4
+; ILP32E-FPELIM-NEXT: add a0, a0, a3
; ILP32E-FPELIM-NEXT: add a0, a0, a4
+; ILP32E-FPELIM-NEXT: add a0, a0, a5
+; ILP32E-FPELIM-NEXT: add a0, a0, a1
; ILP32E-FPELIM-NEXT: ret
;
; ILP32E-WITHFP-LABEL: callee_aligned_stack:
@@ -552,16 +552,16 @@ define i32 @callee_aligned_stack(i32 %a, i32 %b, fp128 %c, i32 %d, i32 %e, i64 %
; ILP32E-WITHFP-NEXT: addi s0, sp, 8
; ILP32E-WITHFP-NEXT: .cfi_def_cfa s0, 0
; ILP32E-WITHFP-NEXT: lw a0, 0(a2)
-; ILP32E-WITHFP-NEXT: lw a1, 12(s0)
+; ILP32E-WITHFP-NEXT: lw a1, 24(s0)
; ILP32E-WITHFP-NEXT: lw a2, 4(s0)
; ILP32E-WITHFP-NEXT: lw a3, 8(s0)
-; ILP32E-WITHFP-NEXT: lw a4, 24(s0)
+; ILP32E-WITHFP-NEXT: lw a4, 12(s0)
; ILP32E-WITHFP-NEXT: lw a5, 20(s0)
; ILP32E-WITHFP-NEXT: add a0, a0, a2
-; ILP32E-WITHFP-NEXT: add a1, a3, a1
-; ILP32E-WITHFP-NEXT: add a0, a0, a1
-; ILP32E-WITHFP-NEXT: add a4, a5, a4
+; ILP32E-WITHFP-NEXT: add a0, a0, a3
; ILP32E-WITHFP-NEXT: add a0, a0, a4
+; ILP32E-WITHFP-NEXT: add a0, a0, a5
+; ILP32E-WITHFP-NEXT: add a0, a0, a1
; ILP32E-WITHFP-NEXT: lw ra, 4(sp) # 4-byte Folded Reload
; ILP32E-WITHFP-NEXT: lw s0, 0(sp) # 4-byte Folded Reload
; ILP32E-WITHFP-NEXT: addi sp, sp, 8
@@ -570,16 +570,16 @@ define i32 @callee_aligned_stack(i32 %a, i32 %b, fp128 %c, i32 %d, i32 %e, i64 %
; ILP32E-FPELIM-SAVE-RESTORE-LABEL: callee_aligned_stack:
; ILP32E-FPELIM-SAVE-RESTORE: # %bb.0:
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: lw a0, 0(a2)
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT: lw a1, 12(sp)
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT: lw a1, 24(sp)
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: lw a2, 4(sp)
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: lw a3, 8(sp)
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT: lw a4, 24(sp)
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT: lw a4, 12(sp)
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: lw a5, 20(sp)
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: add a0, a0, a2
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT: add a1, a3, a1
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT: add a0, a0, a1
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT: add a4, a5, a4
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT: add a0, a0, a3
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: add a0, a0, a4
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT: add a0, a0, a5
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT: add a0, a0, a1
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: ret
;
; ILP32E-WITHFP-SAVE-RESTORE-LABEL: callee_aligned_stack:
@@ -591,16 +591,16 @@ define i32 @callee_aligned_stack(i32 %a, i32 %b, fp128 %c, i32 %d, i32 %e, i64 %
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: addi s0, sp, 8
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: .cfi_def_cfa s0, 0
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: lw a0, 0(a2)
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT: lw a1, 12(s0)
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT: lw a1, 24(s0)
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: lw a2, 4(s0)
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: lw a3, 8(s0)
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT: lw a4, 24(s0)
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT: lw a4, 12(s0)
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: lw a5, 20(s0)
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: add a0, a0, a2
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT: add a1, a3, a1
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT: add a0, a0, a1
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT: add a4, a5, a4
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT: add a0, a0, a3
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: add a0, a0, a4
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT: add a0, a0, a5
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT: add a0, a0, a1
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: tail __riscv_restore_1
%1 = bitcast fp128 %c to i128
%2 = trunc i128 %1 to i32
@@ -1052,16 +1052,16 @@ define i32 @callee_many_scalars(i8 %a, i16 %b, i32 %c, i64 %d, i32 %e, i32 %f, i
; ILP32E-FPELIM-NEXT: andi a0, a0, 255
; ILP32E-FPELIM-NEXT: slli a1, a1, 16
; ILP32E-FPELIM-NEXT: srli a1, a1, 16
-; ILP32E-FPELIM-NEXT: add a0, a0, a2
; ILP32E-FPELIM-NEXT: add a0, a0, a1
+; ILP32E-FPELIM-NEXT: add a0, a0, a2
; ILP32E-FPELIM-NEXT: xor a1, a4, t1
; ILP32E-FPELIM-NEXT: xor a2, a3, t0
; ILP32E-FPELIM-NEXT: or a1, a2, a1
; ILP32E-FPELIM-NEXT: seqz a1, a1
+; ILP32E-FPELIM-NEXT: add a0, a1, a0
; ILP32E-FPELIM-NEXT: add a0, a0, a5
; ILP32E-FPELIM-NEXT: add a0, a0, a7
; ILP32E-FPELIM-NEXT: add a0, a0, a6
-; ILP32E-FPELIM-NEXT: add a0, a1, a0
; ILP32E-FPELIM-NEXT: ret
;
; ILP32E-WITHFP-LABEL: callee_many_scalars:
@@ -1081,16 +1081,16 @@ define i32 @callee_many_scalars(i8 %a, i16 %b, i32 %c, i64 %d, i32 %e, i32 %f, i
; ILP32E-WITHFP-NEXT: andi a0, a0, 255
; ILP32E-WITHFP-NEXT: slli a1, a1, 16
; ILP32E-WITHFP-NEXT: srli a1, a1, 16
-; ILP32E-WITHFP-NEXT: add a0, a0, a2
; ILP32E-WITHFP-NEXT: add a0, a0, a1
+; ILP32E-WITHFP-NEXT: add a0, a0, a2
; ILP32E-WITHFP-NEXT: xor a1, a4, t1
; ILP32E-WITHFP-NEXT: xor a2, a3, t0
; ILP32E-WITHFP-NEXT: or a1, a2, a1
; ILP32E-WITHFP-NEXT: seqz a1, a1
+; ILP32E-WITHFP-NEXT: add a0, a1, a0
; ILP32E-WITHFP-NEXT: add a0, a0, a5
; ILP32E-WITHFP-NEXT: add a0, a0, a7
; ILP32E-WITHFP-NEXT: add a0, a0, a6
-; ILP32E-WITHFP-NEXT: add a0, a1, a0
; ILP32E-WITHFP-NEXT: lw ra, 4(sp) # 4-byte Folded Reload
; ILP32E-WITHFP-NEXT: lw s0, 0(sp) # 4-byte Folded Reload
; ILP32E-WITHFP-NEXT: addi sp, sp, 8
@@ -1105,16 +1105,16 @@ define i32 @callee_many_scalars(i8 %a, i16 %b, i32 %c, i64 %d, i32 %e, i32 %f, i
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: andi a0, a0, 255
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: slli a1, a1, 16
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: srli a1, a1, 16
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT: add a0, a0, a2
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: add a0, a0, a1
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT: add a0, a0, a2
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: xor a1, a4, t1
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: xor a2, a3, t0
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: or a1, a2, a1
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: seqz a1, a1
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT: add a0, a1, a0
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: add a0, a0, a5
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: add a0, a0, a7
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: add a0, a0, a6
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT: add a0, a1, a0
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: ret
;
; ILP32E-WITHFP-SAVE-RESTORE-LABEL: callee_many_scalars:
@@ -1132,16 +1132,16 @@ define i32 @callee_many_scalars(i8 %a, i16 %b, i32 %c, i64 %d, i32 %e, i32 %f, i
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: andi a0, a0, 255
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: slli a1, a1, 16
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: srli a1, a1, 16
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT: add a0, a0, a2
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: add a0, a0, a1
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT: add a0, a0, a2
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: xor a1, a4, t1
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: xor a2, a3, t0
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: or a1, a2, a1
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: seqz a1, a1
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT: add a0, a1, a0
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: add a0, a0, a5
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: add a0, a0, a7
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: add a0, a0, a6
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT: add a0, a1, a0
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: tail __riscv_restore_1
%a_ext = zext i8 %a to i32
%b_ext = zext i16 %b to i32
diff --git a/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-lp64d-common.ll b/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-lp64d-common.ll
index 67123466354c4..3e640e9ad8e18 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-lp64d-common.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-lp64d-common.ll
@@ -53,16 +53,16 @@ define i32 @callee_many_scalars(i8 %a, i16 %b, i32 %c, i128 %d, i32 %e, i32 %f,
; RV64I-NEXT: andi a0, a0, 255
; RV64I-NEXT: slli a1, a1, 48
; RV64I-NEXT: srli a1, a1, 48
-; RV64I-NEXT: add a0, a0, a2
; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: add a0, a0, a2
; RV64I-NEXT: xor a1, a4, t1
; RV64I-NEXT: xor a2, a3, a7
; RV64I-NEXT: or a1, a2, a1
; RV64I-NEXT: seqz a1, a1
+; RV64I-NEXT: add a0, a1, a0
; RV64I-NEXT: add a0, a0, a5
; RV64I-NEXT: add a0, a0, a6
-; RV64I-NEXT: add a0, a0, t0
-; RV64I-NEXT: addw a0, a1, a0
+; RV64I-NEXT: addw a0, a0, t0
; RV64I-NEXT: ret
%a_ext = zext i8 %a to i32
%b_ext = zext i16 %b to i32
@@ -328,15 +328,15 @@ define i64 @callee_aligned_stack(i64 %a, i64 %b, i64 %c, i64 %d, i64 %e, i128 %f
; should only be 8-byte aligned
; RV64I-LABEL: callee_aligned_stack:
; RV64I: # %bb.0:
-; RV64I-NEXT: ld a0, 32(sp)
+; RV64I-NEXT: ld a0, 40(sp)
; RV64I-NEXT: ld a1, 0(sp)
; RV64I-NEXT: ld a2, 16(sp)
-; RV64I-NEXT: ld a3, 40(sp)
+; RV64I-NEXT: ld a3, 32(sp)
; RV64I-NEXT: add a5, a5, a7
; RV64I-NEXT: add a1, a5, a1
-; RV64I-NEXT: add a0, a2, a0
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: add a1, a1, a3
; RV64I-NEXT: add a0, a1, a0
-; RV64I-NEXT: add a0, a0, a3
; RV64I-NEXT: ret
%f_trunc = trunc i128 %f to i64
%1 = add i64 %f_trunc, %g
diff --git a/llvm/test/CodeGen/RISCV/compress.ll b/llvm/test/CodeGen/RISCV/compress.ll
index 8fb520fac41ee..e461ef5ad679d 100644
--- a/llvm/test/CodeGen/RISCV/compress.ll
+++ b/llvm/test/CodeGen/RISCV/compress.ll
@@ -35,8 +35,8 @@ define i32 @simple_arith(i32 %a, i32 %b) #0 {
; RV32IC-NEXT: c.andi a2, 0xb
; RV32IC-NEXT: c.slli a2, 0x7
; RV32IC-NEXT: c.srai a1, 0x9
+; RV32IC-NEXT: c.add a1, a2
; RV32IC-NEXT: sub a0, a1, a0
-; RV32IC-NEXT: c.add a0, a2
; RV32IC-NEXT: c.jr ra
%1 = add i32 %a, 1
%2 = and i32 %1, 11
diff --git a/llvm/test/CodeGen/RISCV/copysign-casts.ll b/llvm/test/CodeGen/RISCV/copysign-casts.ll
index accd52369fa1c..4cddd70d4d977 100644
--- a/llvm/test/CodeGen/RISCV/copysign-casts.ll
+++ b/llvm/test/CodeGen/RISCV/copysign-casts.ll
@@ -598,9 +598,9 @@ define half @fold_demote_h_d(half %a, double %b) nounwind {
; RV32IFD-NEXT: srli a0, a0, 16
; RV32IFD-NEXT: slli a1, a1, 17
; RV32IFD-NEXT: srli a1, a1, 17
-; RV32IFD-NEXT: lui a2, 1048560
-; RV32IFD-NEXT: or a1, a1, a2
; RV32IFD-NEXT: or a0, a1, a0
+; RV32IFD-NEXT: lui a1, 1048560
+; RV32IFD-NEXT: or a0, a0, a1
; RV32IFD-NEXT: fmv.w.x fa0, a0
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
@@ -614,9 +614,9 @@ define half @fold_demote_h_d(half %a, double %b) nounwind {
; RV64IFD-NEXT: srli a0, a0, 63
; RV64IFD-NEXT: slli a0, a0, 63
; RV64IFD-NEXT: srli a0, a0, 48
-; RV64IFD-NEXT: lui a2, 1048560
-; RV64IFD-NEXT: or a1, a1, a2
; RV64IFD-NEXT: or a0, a1, a0
+; RV64IFD-NEXT: lui a1, 1048560
+; RV64IFD-NEXT: or a0, a0, a1
; RV64IFD-NEXT: fmv.w.x fa0, a0
; RV64IFD-NEXT: ret
;
diff --git a/llvm/test/CodeGen/RISCV/div-pow2.ll b/llvm/test/CodeGen/RISCV/div-pow2.ll
index 254e675b4ed8b..ba621631823ad 100644
--- a/llvm/test/CodeGen/RISCV/div-pow2.ll
+++ b/llvm/test/CodeGen/RISCV/div-pow2.ll
@@ -213,8 +213,8 @@ define i64 @sdiv64_pow2_negative_2(i64 %a) {
; RV32I-NEXT: neg a0, a3
; RV32I-NEXT: snez a2, a3
; RV32I-NEXT: srai a1, a1, 1
+; RV32I-NEXT: add a1, a1, a2
; RV32I-NEXT: neg a1, a1
-; RV32I-NEXT: sub a1, a1, a2
; RV32I-NEXT: ret
;
; RV64I-LABEL: sdiv64_pow2_negative_2:
@@ -269,8 +269,8 @@ define i64 @sdiv64_pow2_negative_2048(i64 %a) {
; RV32I-NEXT: neg a0, a3
; RV32I-NEXT: snez a2, a3
; RV32I-NEXT: srai a1, a1, 11
+; RV32I-NEXT: add a1, a1, a2
; RV32I-NEXT: neg a1, a1
-; RV32I-NEXT: sub a1, a1, a2
; RV32I-NEXT: ret
;
; RV64I-LABEL: sdiv64_pow2_negative_2048:
@@ -326,8 +326,8 @@ define i64 @sdiv64_pow2_negative_4096(i64 %a) {
; RV32I-NEXT: neg a0, a3
; RV32I-NEXT: snez a2, a3
; RV32I-NEXT: srai a1, a1, 12
+; RV32I-NEXT: add a1, a1, a2
; RV32I-NEXT: neg a1, a1
-; RV32I-NEXT: sub a1, a1, a2
; RV32I-NEXT: ret
;
; RV64I-LABEL: sdiv64_pow2_negative_4096:
@@ -383,8 +383,8 @@ define i64 @sdiv64_pow2_negative_65536(i64 %a) {
; RV32I-NEXT: neg a0, a3
; RV32I-NEXT: snez a2, a3
; RV32I-NEXT: srai a1, a1, 16
+; RV32I-NEXT: add a1, a1, a2
; RV32I-NEXT: neg a1, a1
-; RV32I-NEXT: sub a1, a1, a2
; RV32I-NEXT: ret
;
; RV64I-LABEL: sdiv64_pow2_negative_65536:
@@ -437,8 +437,8 @@ define i64 @sdiv64_pow2_negative_8589934592(i64 %a) {
; RV32I-NEXT: srai a1, a0, 31
; RV32I-NEXT: srai a0, a0, 1
; RV32I-NEXT: snez a2, a0
+; RV32I-NEXT: add a1, a1, a2
; RV32I-NEXT: neg a1, a1
-; RV32I-NEXT: sub a1, a1, a2
; RV32I-NEXT: neg a0, a0
; RV32I-NEXT: ret
;
diff --git a/llvm/test/CodeGen/RISCV/float-intrinsics.ll b/llvm/test/CodeGen/RISCV/float-intrinsics.ll
index a00d82942cabe..9d3e278f320d7 100644
--- a/llvm/test/CodeGen/RISCV/float-intrinsics.ll
+++ b/llvm/test/CodeGen/RISCV/float-intrinsics.ll
@@ -1658,12 +1658,12 @@ define i1 @fpclass(float %x) {
; RV32I-NEXT: slti a0, a0, 0
; RV32I-NEXT: and a2, a2, a0
; RV32I-NEXT: seqz a3, a1
-; RV32I-NEXT: lui a4, 522240
-; RV32I-NEXT: xor a5, a1, a4
-; RV32I-NEXT: seqz a5, a5
-; RV32I-NEXT: or a3, a3, a5
; RV32I-NEXT: or a2, a3, a2
-; RV32I-NEXT: slt a3, a4, a1
+; RV32I-NEXT: lui a3, 522240
+; RV32I-NEXT: xor a4, a1, a3
+; RV32I-NEXT: seqz a4, a4
+; RV32I-NEXT: or a2, a2, a4
+; RV32I-NEXT: slt a3, a3, a1
; RV32I-NEXT: or a2, a2, a3
; RV32I-NEXT: lui a3, 1046528
; RV32I-NEXT: add a1, a1, a3
@@ -1685,12 +1685,12 @@ define i1 @fpclass(float %x) {
; RV64I-NEXT: slti a1, a1, 0
; RV64I-NEXT: and a2, a2, a1
; RV64I-NEXT: seqz a3, a0
-; RV64I-NEXT: lui a4, 522240
-; RV64I-NEXT: xor a5, a0, a4
-; RV64I-NEXT: seqz a5, a5
-; RV64I-NEXT: or a3, a3, a5
; RV64I-NEXT: or a2, a3, a2
-; RV64I-NEXT: slt a3, a4, a0
+; RV64I-NEXT: lui a3, 522240
+; RV64I-NEXT: xor a4, a0, a3
+; RV64I-NEXT: seqz a4, a4
+; RV64I-NEXT: or a2, a2, a4
+; RV64I-NEXT: slt a3, a3, a0
; RV64I-NEXT: or a2, a2, a3
; RV64I-NEXT: lui a3, 1046528
; RV64I-NEXT: add a0, a0, a3
diff --git a/llvm/test/CodeGen/RISCV/iabs.ll b/llvm/test/CodeGen/RISCV/iabs.ll
index a0c85ab4dca7f..479ac1c54e90f 100644
--- a/llvm/test/CodeGen/RISCV/iabs.ll
+++ b/llvm/test/CodeGen/RISCV/iabs.ll
@@ -225,8 +225,8 @@ define i64 @abs64(i64 %x) {
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: snez a2, a0
; RV32I-NEXT: neg a0, a0
+; RV32I-NEXT: add a1, a1, a2
; RV32I-NEXT: neg a1, a1
-; RV32I-NEXT: sub a1, a1, a2
; RV32I-NEXT: .LBB6_2:
; RV32I-NEXT: ret
;
@@ -236,8 +236,8 @@ define i64 @abs64(i64 %x) {
; RV32ZBB-NEXT: # %bb.1:
; RV32ZBB-NEXT: snez a2, a0
; RV32ZBB-NEXT: neg a0, a0
+; RV32ZBB-NEXT: add a1, a1, a2
; RV32ZBB-NEXT: neg a1, a1
-; RV32ZBB-NEXT: sub a1, a1, a2
; RV32ZBB-NEXT: .LBB6_2:
; RV32ZBB-NEXT: ret
;
@@ -264,8 +264,8 @@ define i64 @select_abs64(i64 %x) {
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: snez a2, a0
; RV32I-NEXT: neg a0, a0
+; RV32I-NEXT: add a1, a1, a2
; RV32I-NEXT: neg a1, a1
-; RV32I-NEXT: sub a1, a1, a2
; RV32I-NEXT: .LBB7_2:
; RV32I-NEXT: ret
;
@@ -275,8 +275,8 @@ define i64 @select_abs64(i64 %x) {
; RV32ZBB-NEXT: # %bb.1:
; RV32ZBB-NEXT: snez a2, a0
; RV32ZBB-NEXT: neg a0, a0
+; RV32ZBB-NEXT: add a1, a1, a2
; RV32ZBB-NEXT: neg a1, a1
-; RV32ZBB-NEXT: sub a1, a1, a2
; RV32ZBB-NEXT: .LBB7_2:
; RV32ZBB-NEXT: ret
;
@@ -314,11 +314,11 @@ define i128 @abs128(i128 %x) {
; RV32I-NEXT: sltu t0, a5, a6
; RV32I-NEXT: snez a1, a1
; RV32I-NEXT: add a1, a2, a1
-; RV32I-NEXT: neg a1, a1
-; RV32I-NEXT: sub a2, a1, t0
+; RV32I-NEXT: add a1, a1, t0
+; RV32I-NEXT: neg a2, a1
; RV32I-NEXT: sub a1, a5, a6
+; RV32I-NEXT: add a4, a4, a7
; RV32I-NEXT: neg a4, a4
-; RV32I-NEXT: sub a4, a4, a7
; RV32I-NEXT: neg a3, a3
; RV32I-NEXT: .LBB8_2:
; RV32I-NEXT: sw a3, 0(a0)
@@ -342,11 +342,11 @@ define i128 @abs128(i128 %x) {
; RV32ZBB-NEXT: sltu t0, a5, a6
; RV32ZBB-NEXT: snez a1, a1
; RV32ZBB-NEXT: add a1, a2, a1
-; RV32ZBB-NEXT: neg a1, a1
-; RV32ZBB-NEXT: sub a2, a1, t0
+; RV32ZBB-NEXT: add a1, a1, t0
+; RV32ZBB-NEXT: neg a2, a1
; RV32ZBB-NEXT: sub a1, a5, a6
+; RV32ZBB-NEXT: add a4, a4, a7
; RV32ZBB-NEXT: neg a4, a4
-; RV32ZBB-NEXT: sub a4, a4, a7
; RV32ZBB-NEXT: neg a3, a3
; RV32ZBB-NEXT: .LBB8_2:
; RV32ZBB-NEXT: sw a3, 0(a0)
@@ -361,8 +361,8 @@ define i128 @abs128(i128 %x) {
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: snez a2, a0
; RV64I-NEXT: neg a0, a0
+; RV64I-NEXT: add a1, a1, a2
; RV64I-NEXT: neg a1, a1
-; RV64I-NEXT: sub a1, a1, a2
; RV64I-NEXT: .LBB8_2:
; RV64I-NEXT: ret
;
@@ -372,8 +372,8 @@ define i128 @abs128(i128 %x) {
; RV64ZBB-NEXT: # %bb.1:
; RV64ZBB-NEXT: snez a2, a0
; RV64ZBB-NEXT: neg a0, a0
+; RV64ZBB-NEXT: add a1, a1, a2
; RV64ZBB-NEXT: neg a1, a1
-; RV64ZBB-NEXT: sub a1, a1, a2
; RV64ZBB-NEXT: .LBB8_2:
; RV64ZBB-NEXT: ret
%abs = tail call i128 @llvm.abs.i128(i128 %x, i1 true)
@@ -396,11 +396,11 @@ define i128 @select_abs128(i128 %x) {
; RV32I-NEXT: sltu t0, a5, a6
; RV32I-NEXT: snez a1, a1
; RV32I-NEXT: add a1, a2, a1
-; RV32I-NEXT: neg a1, a1
-; RV32I-NEXT: sub a2, a1, t0
+; RV32I-NEXT: add a1, a1, t0
+; RV32I-NEXT: neg a2, a1
; RV32I-NEXT: sub a1, a5, a6
+; RV32I-NEXT: add a4, a4, a7
; RV32I-NEXT: neg a4, a4
-; RV32I-NEXT: sub a4, a4, a7
; RV32I-NEXT: neg a3, a3
; RV32I-NEXT: .LBB9_2:
; RV32I-NEXT: sw a3, 0(a0)
@@ -424,11 +424,11 @@ define i128 @select_abs128(i128 %x) {
; RV32ZBB-NEXT: sltu t0, a5, a6
; RV32ZBB-NEXT: snez a1, a1
; RV32ZBB-NEXT: add a1, a2, a1
-; RV32ZBB-NEXT: neg a1, a1
-; RV32ZBB-NEXT: sub a2, a1, t0
+; RV32ZBB-NEXT: add a1, a1, t0
+; RV32ZBB-NEXT: neg a2, a1
; RV32ZBB-NEXT: sub a1, a5, a6
+; RV32ZBB-NEXT: add a4, a4, a7
; RV32ZBB-NEXT: neg a4, a4
-; RV32ZBB-NEXT: sub a4, a4, a7
; RV32ZBB-NEXT: neg a3, a3
; RV32ZBB-NEXT: .LBB9_2:
; RV32ZBB-NEXT: sw a3, 0(a0)
@@ -443,8 +443,8 @@ define i128 @select_abs128(i128 %x) {
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: snez a2, a0
; RV64I-NEXT: neg a0, a0
+; RV64I-NEXT: add a1, a1, a2
; RV64I-NEXT: neg a1, a1
-; RV64I-NEXT: sub a1, a1, a2
; RV64I-NEXT: .LBB9_2:
; RV64I-NEXT: ret
;
@@ -454,8 +454,8 @@ define i128 @select_abs128(i128 %x) {
; RV64ZBB-NEXT: # %bb.1:
; RV64ZBB-NEXT: snez a2, a0
; RV64ZBB-NEXT: neg a0, a0
+; RV64ZBB-NEXT: add a1, a1, a2
; RV64ZBB-NEXT: neg a1, a1
-; RV64ZBB-NEXT: sub a1, a1, a2
; RV64ZBB-NEXT: .LBB9_2:
; RV64ZBB-NEXT: ret
%1 = icmp slt i128 %x, 0
diff --git a/llvm/test/CodeGen/RISCV/machine-combiner.mir b/llvm/test/CodeGen/RISCV/machine-combiner.mir
index e110dd9985f63..463f7cd90f7ba 100644
--- a/llvm/test/CodeGen/RISCV/machine-combiner.mir
+++ b/llvm/test/CodeGen/RISCV/machine-combiner.mir
@@ -64,10 +64,11 @@ body: |
; CHECK-NEXT: [[COPY5:%[0-9]+]]:gpr = COPY $x10
; CHECK-NEXT: [[FLW:%[0-9]+]]:fpr32 = FLW [[COPY5]], 0 :: (load (s32) from %ir.0)
; CHECK-NEXT: [[FLW1:%[0-9]+]]:fpr32 = FLW [[COPY4]], 0 :: (load (s32) from %ir.1)
- ; CHECK-NEXT: [[FMADD_S:%[0-9]+]]:fpr32 = nnan ninf nsz arcp contract afn reassoc nofpexcept FMADD_S [[FLW1]], [[FLW]], [[COPY3]], 7, implicit $frm
- ; CHECK-NEXT: FSW killed [[FMADD_S]], [[COPY1]], 0 :: (store (s32) into %ir.4)
- ; CHECK-NEXT: [[FNMSUB_S:%[0-9]+]]:fpr32 = nnan ninf nsz arcp contract afn reassoc nofpexcept FNMSUB_S [[FLW1]], [[FLW]], [[COPY2]], 7, implicit $frm
- ; CHECK-NEXT: FSW killed [[FNMSUB_S]], [[COPY]], 0 :: (store (s32) into %ir.5)
+ ; CHECK-NEXT: [[FMUL_S:%[0-9]+]]:fpr32 = nnan ninf nsz arcp contract afn reassoc nofpexcept FMUL_S [[FLW1]], [[FLW]], 7, implicit $frm
+ ; CHECK-NEXT: [[FADD_S:%[0-9]+]]:fpr32 = nnan ninf nsz arcp contract afn reassoc nofpexcept FADD_S [[FMUL_S]], [[COPY3]], 7, implicit $frm
+ ; CHECK-NEXT: FSW killed [[FADD_S]], [[COPY1]], 0 :: (store (s32) into %ir.4)
+ ; CHECK-NEXT: [[FSUB_S:%[0-9]+]]:fpr32 = nnan ninf nsz arcp contract afn reassoc nofpexcept FSUB_S [[COPY2]], [[FMUL_S]], 7, implicit $frm
+ ; CHECK-NEXT: FSW killed [[FSUB_S]], [[COPY]], 0 :: (store (s32) into %ir.5)
; CHECK-NEXT: PseudoRET
%5:gpr = COPY $x13
%4:gpr = COPY $x12
diff --git a/llvm/test/CodeGen/RISCV/misched-load-clustering.ll b/llvm/test/CodeGen/RISCV/misched-load-clustering.ll
index db41b26271814..e70941d84836a 100644
--- a/llvm/test/CodeGen/RISCV/misched-load-clustering.ll
+++ b/llvm/test/CodeGen/RISCV/misched-load-clustering.ll
@@ -18,15 +18,15 @@ define i32 @load_clustering_1(ptr nocapture %p) {
; NOCLUSTER: SU(1): %1:gpr = LW %0:gpr, 12
; NOCLUSTER: SU(2): %2:gpr = LW %0:gpr, 8
; NOCLUSTER: SU(4): %4:gpr = LW %0:gpr, 4
-; NOCLUSTER: SU(5): %6:gpr = LW %0:gpr, 16
+; NOCLUSTER: SU(6): %6:gpr = LW %0:gpr, 16
;
; LDCLUSTER: ********** MI Scheduling **********
; LDCLUSTER-LABEL: load_clustering_1:%bb.0
; LDCLUSTER: *** Final schedule for %bb.0 ***
-; LDCLUSTER: SU(4): %4:gpr = LW %0:gpr, 4
; LDCLUSTER: SU(2): %2:gpr = LW %0:gpr, 8
; LDCLUSTER: SU(1): %1:gpr = LW %0:gpr, 12
-; LDCLUSTER: SU(5): %6:gpr = LW %0:gpr, 16
+; LDCLUSTER: SU(4): %4:gpr = LW %0:gpr, 4
+; LDCLUSTER: SU(6): %6:gpr = LW %0:gpr, 16
entry:
%arrayidx0 = getelementptr inbounds i32, ptr %p, i32 3
%val0 = load i32, ptr %arrayidx0
diff --git a/llvm/test/CodeGen/RISCV/mul.ll b/llvm/test/CodeGen/RISCV/mul.ll
index 14f2777fdd06d..e1ca41820c0bc 100644
--- a/llvm/test/CodeGen/RISCV/mul.ll
+++ b/llvm/test/CodeGen/RISCV/mul.ll
@@ -299,8 +299,8 @@ define i32 @mulhs_negative_constant(i32 %a) nounwind {
; RV32I-NEXT: slli a4, a1, 2
; RV32I-NEXT: or a0, a4, a0
; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: add a0, a0, a2
; RV32I-NEXT: snez a1, a3
-; RV32I-NEXT: add a1, a2, a1
; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: neg a0, a0
; RV32I-NEXT: ret
@@ -315,8 +315,8 @@ define i32 @mulhs_negative_constant(i32 %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: sext.w a0, a0
; RV64I-NEXT: slli a1, a0, 2
+; RV64I-NEXT: add a0, a1, a0
; RV64I-NEXT: neg a0, a0
-; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: srli a0, a0, 32
; RV64I-NEXT: ret
;
@@ -324,8 +324,8 @@ define i32 @mulhs_negative_constant(i32 %a) nounwind {
; RV64IM: # %bb.0:
; RV64IM-NEXT: sext.w a0, a0
; RV64IM-NEXT: slli a1, a0, 2
+; RV64IM-NEXT: add a0, a1, a0
; RV64IM-NEXT: neg a0, a0
-; RV64IM-NEXT: sub a0, a0, a1
; RV64IM-NEXT: srli a0, a0, 32
; RV64IM-NEXT: ret
%1 = sext i32 %a to i64
@@ -814,29 +814,29 @@ define i32 @muli32_m65(i32 %a) nounwind {
; RV32I-LABEL: muli32_m65:
; RV32I: # %bb.0:
; RV32I-NEXT: slli a1, a0, 6
+; RV32I-NEXT: add a0, a1, a0
; RV32I-NEXT: neg a0, a0
-; RV32I-NEXT: sub a0, a0, a1
; RV32I-NEXT: ret
;
; RV32IM-LABEL: muli32_m65:
; RV32IM: # %bb.0:
; RV32IM-NEXT: slli a1, a0, 6
+; RV32IM-NEXT: add a0, a1, a0
; RV32IM-NEXT: neg a0, a0
-; RV32IM-NEXT: sub a0, a0, a1
; RV32IM-NEXT: ret
;
; RV64I-LABEL: muli32_m65:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a0, 6
+; RV64I-NEXT: add a0, a1, a0
; RV64I-NEXT: negw a0, a0
-; RV64I-NEXT: subw a0, a0, a1
; RV64I-NEXT: ret
;
; RV64IM-LABEL: muli32_m65:
; RV64IM: # %bb.0:
; RV64IM-NEXT: slli a1, a0, 6
+; RV64IM-NEXT: add a0, a1, a0
; RV64IM-NEXT: negw a0, a0
-; RV64IM-NEXT: subw a0, a0, a1
; RV64IM-NEXT: ret
%1 = mul i32 %a, -65
ret i32 %1
@@ -894,8 +894,8 @@ define i64 @muli64_m65(i64 %a) nounwind {
; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: add a0, a0, a2
; RV32I-NEXT: snez a1, a3
-; RV32I-NEXT: neg a1, a1
-; RV32I-NEXT: sub a1, a1, a0
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: neg a1, a0
; RV32I-NEXT: neg a0, a3
; RV32I-NEXT: ret
;
@@ -908,22 +908,22 @@ define i64 @muli64_m65(i64 %a) nounwind {
; RV32IM-NEXT: sub a2, a2, a0
; RV32IM-NEXT: sub a1, a2, a1
; RV32IM-NEXT: slli a2, a0, 6
+; RV32IM-NEXT: add a0, a2, a0
; RV32IM-NEXT: neg a0, a0
-; RV32IM-NEXT: sub a0, a0, a2
; RV32IM-NEXT: ret
;
; RV64I-LABEL: muli64_m65:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a0, 6
+; RV64I-NEXT: add a0, a1, a0
; RV64I-NEXT: neg a0, a0
-; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; RV64IM-LABEL: muli64_m65:
; RV64IM: # %bb.0:
; RV64IM-NEXT: slli a1, a0, 6
+; RV64IM-NEXT: add a0, a1, a0
; RV64IM-NEXT: neg a0, a0
-; RV64IM-NEXT: sub a0, a0, a1
; RV64IM-NEXT: ret
%1 = mul i64 %a, -65
ret i64 %1
@@ -1386,17 +1386,17 @@ define i128 @muli128_m3840(i128 %a) nounwind {
; RV32IM-NEXT: mulhu t1, a3, a5
; RV32IM-NEXT: sub a3, t1, a3
; RV32IM-NEXT: add a2, a3, a2
-; RV32IM-NEXT: add a1, a4, a1
-; RV32IM-NEXT: sub a1, t3, a1
-; RV32IM-NEXT: add a1, a1, a2
-; RV32IM-NEXT: add a1, a1, t0
-; RV32IM-NEXT: add a1, a7, a1
-; RV32IM-NEXT: add a1, a1, s0
-; RV32IM-NEXT: mul a2, a4, a5
-; RV32IM-NEXT: sw a2, 0(a0)
+; RV32IM-NEXT: sub a3, t3, a4
+; RV32IM-NEXT: sub a3, a3, a1
+; RV32IM-NEXT: add a2, a3, a2
+; RV32IM-NEXT: add a2, a2, t0
+; RV32IM-NEXT: add a2, a7, a2
+; RV32IM-NEXT: add a2, a2, s0
+; RV32IM-NEXT: mul a1, a4, a5
+; RV32IM-NEXT: sw a1, 0(a0)
; RV32IM-NEXT: sw a6, 4(a0)
; RV32IM-NEXT: sw t6, 8(a0)
-; RV32IM-NEXT: sw a1, 12(a0)
+; RV32IM-NEXT: sw a2, 12(a0)
; RV32IM-NEXT: lw s0, 12(sp) # 4-byte Folded Reload
; RV32IM-NEXT: lw s1, 8(sp) # 4-byte Folded Reload
; RV32IM-NEXT: addi sp, sp, 16
@@ -1497,8 +1497,8 @@ define i128 @muli128_m63(i128 %a) nounwind {
; RV32IM-NEXT: add t1, a7, t1
; RV32IM-NEXT: sub t4, t1, a4
; RV32IM-NEXT: slli t5, a1, 6
-; RV32IM-NEXT: sub t6, a1, a3
-; RV32IM-NEXT: sub t5, t6, t5
+; RV32IM-NEXT: sub t5, a1, t5
+; RV32IM-NEXT: sub t5, t5, a3
; RV32IM-NEXT: add t6, t4, t5
; RV32IM-NEXT: sltu s0, t6, t4
; RV32IM-NEXT: neg s1, a4
@@ -1513,8 +1513,8 @@ define i128 @muli128_m63(i128 %a) nounwind {
; RV32IM-NEXT: mulhu a5, a1, a5
; RV32IM-NEXT: sub a5, a5, a1
; RV32IM-NEXT: add a2, a5, a2
-; RV32IM-NEXT: add a4, a3, a4
-; RV32IM-NEXT: sub a1, t3, a4
+; RV32IM-NEXT: sub a1, t3, a3
+; RV32IM-NEXT: sub a1, a1, a4
; RV32IM-NEXT: add a1, a1, a2
; RV32IM-NEXT: add a1, a1, t0
; RV32IM-NEXT: add a1, a7, a1
diff --git a/llvm/test/CodeGen/RISCV/neg-abs.ll b/llvm/test/CodeGen/RISCV/neg-abs.ll
index 6f301882b452c..b717b71e1ce96 100644
--- a/llvm/test/CodeGen/RISCV/neg-abs.ll
+++ b/llvm/test/CodeGen/RISCV/neg-abs.ll
@@ -204,14 +204,14 @@ define i64 @neg_abs64_multiuse(i64 %x, ptr %y) {
; RV32I-NEXT: bgez a1, .LBB5_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: snez a3, a0
+; RV32I-NEXT: add a1, a1, a3
; RV32I-NEXT: neg a1, a1
-; RV32I-NEXT: sub a1, a1, a3
; RV32I-NEXT: neg a0, a0
; RV32I-NEXT: .LBB5_2:
; RV32I-NEXT: sw a0, 0(a2)
; RV32I-NEXT: snez a3, a0
-; RV32I-NEXT: neg a4, a1
-; RV32I-NEXT: sub a3, a4, a3
+; RV32I-NEXT: add a3, a1, a3
+; RV32I-NEXT: neg a3, a3
; RV32I-NEXT: neg a0, a0
; RV32I-NEXT: sw a1, 4(a2)
; RV32I-NEXT: mv a1, a3
@@ -222,14 +222,14 @@ define i64 @neg_abs64_multiuse(i64 %x, ptr %y) {
; RV32ZBB-NEXT: bgez a1, .LBB5_2
; RV32ZBB-NEXT: # %bb.1:
; RV32ZBB-NEXT: snez a3, a0
+; RV32ZBB-NEXT: add a1, a1, a3
; RV32ZBB-NEXT: neg a1, a1
-; RV32ZBB-NEXT: sub a1, a1, a3
; RV32ZBB-NEXT: neg a0, a0
; RV32ZBB-NEXT: .LBB5_2:
; RV32ZBB-NEXT: sw a0, 0(a2)
; RV32ZBB-NEXT: snez a3, a0
-; RV32ZBB-NEXT: neg a4, a1
-; RV32ZBB-NEXT: sub a3, a4, a3
+; RV32ZBB-NEXT: add a3, a1, a3
+; RV32ZBB-NEXT: neg a3, a3
; RV32ZBB-NEXT: neg a0, a0
; RV32ZBB-NEXT: sw a1, 4(a2)
; RV32ZBB-NEXT: mv a1, a3
diff --git a/llvm/test/CodeGen/RISCV/reduction-formation.ll b/llvm/test/CodeGen/RISCV/reduction-formation.ll
index 6b4dc0cd3699e..3e4219e0a1d64 100644
--- a/llvm/test/CodeGen/RISCV/reduction-formation.ll
+++ b/llvm/test/CodeGen/RISCV/reduction-formation.ll
@@ -8,24 +8,24 @@
define i32 @reduce_sum_4xi32(<4 x i32> %v) {
; RV32-LABEL: reduce_sum_4xi32:
; RV32: # %bb.0:
-; RV32-NEXT: lw a1, 12(a0)
-; RV32-NEXT: lw a2, 4(a0)
-; RV32-NEXT: lw a3, 0(a0)
-; RV32-NEXT: lw a0, 8(a0)
-; RV32-NEXT: add a2, a3, a2
-; RV32-NEXT: add a0, a0, a1
-; RV32-NEXT: add a0, a2, a0
+; RV32-NEXT: lw a1, 4(a0)
+; RV32-NEXT: lw a2, 0(a0)
+; RV32-NEXT: lw a3, 8(a0)
+; RV32-NEXT: lw a0, 12(a0)
+; RV32-NEXT: add a1, a2, a1
+; RV32-NEXT: add a1, a1, a3
+; RV32-NEXT: add a0, a1, a0
; RV32-NEXT: ret
;
; RV64-LABEL: reduce_sum_4xi32:
; RV64: # %bb.0:
-; RV64-NEXT: lw a1, 24(a0)
-; RV64-NEXT: lw a2, 8(a0)
-; RV64-NEXT: lw a3, 0(a0)
-; RV64-NEXT: lw a0, 16(a0)
-; RV64-NEXT: add a2, a3, a2
-; RV64-NEXT: add a0, a0, a1
-; RV64-NEXT: addw a0, a2, a0
+; RV64-NEXT: lw a1, 8(a0)
+; RV64-NEXT: lw a2, 0(a0)
+; RV64-NEXT: lw a3, 16(a0)
+; RV64-NEXT: lw a0, 24(a0)
+; RV64-NEXT: add a1, a2, a1
+; RV64-NEXT: add a1, a1, a3
+; RV64-NEXT: addw a0, a1, a0
; RV64-NEXT: ret
%e0 = extractelement <4 x i32> %v, i32 0
%e1 = extractelement <4 x i32> %v, i32 1
@@ -40,24 +40,24 @@ define i32 @reduce_sum_4xi32(<4 x i32> %v) {
define i32 @reduce_xor_4xi32(<4 x i32> %v) {
; RV32-LABEL: reduce_xor_4xi32:
; RV32: # %bb.0:
-; RV32-NEXT: lw a1, 12(a0)
-; RV32-NEXT: lw a2, 4(a0)
-; RV32-NEXT: lw a3, 0(a0)
-; RV32-NEXT: lw a0, 8(a0)
-; RV32-NEXT: xor a2, a3, a2
-; RV32-NEXT: xor a0, a0, a1
-; RV32-NEXT: xor a0, a2, a0
+; RV32-NEXT: lw a1, 4(a0)
+; RV32-NEXT: lw a2, 0(a0)
+; RV32-NEXT: lw a3, 8(a0)
+; RV32-NEXT: lw a0, 12(a0)
+; RV32-NEXT: xor a1, a2, a1
+; RV32-NEXT: xor a1, a1, a3
+; RV32-NEXT: xor a0, a1, a0
; RV32-NEXT: ret
;
; RV64-LABEL: reduce_xor_4xi32:
; RV64: # %bb.0:
-; RV64-NEXT: ld a1, 24(a0)
-; RV64-NEXT: ld a2, 8(a0)
-; RV64-NEXT: ld a3, 0(a0)
-; RV64-NEXT: ld a0, 16(a0)
-; RV64-NEXT: xor a2, a3, a2
-; RV64-NEXT: xor a0, a0, a1
-; RV64-NEXT: xor a0, a2, a0
+; RV64-NEXT: ld a1, 8(a0)
+; RV64-NEXT: ld a2, 0(a0)
+; RV64-NEXT: ld a3, 16(a0)
+; RV64-NEXT: ld a0, 24(a0)
+; RV64-NEXT: xor a1, a2, a1
+; RV64-NEXT: xor a1, a1, a3
+; RV64-NEXT: xor a0, a1, a0
; RV64-NEXT: ret
%e0 = extractelement <4 x i32> %v, i32 0
%e1 = extractelement <4 x i32> %v, i32 1
@@ -72,24 +72,24 @@ define i32 @reduce_xor_4xi32(<4 x i32> %v) {
define i32 @reduce_or_4xi32(<4 x i32> %v) {
; RV32-LABEL: reduce_or_4xi32:
; RV32: # %bb.0:
-; RV32-NEXT: lw a1, 12(a0)
-; RV32-NEXT: lw a2, 4(a0)
-; RV32-NEXT: lw a3, 0(a0)
-; RV32-NEXT: lw a0, 8(a0)
-; RV32-NEXT: or a2, a3, a2
-; RV32-NEXT: or a0, a0, a1
-; RV32-NEXT: or a0, a2, a0
+; RV32-NEXT: lw a1, 4(a0)
+; RV32-NEXT: lw a2, 0(a0)
+; RV32-NEXT: lw a3, 8(a0)
+; RV32-NEXT: lw a0, 12(a0)
+; RV32-NEXT: or a1, a2, a1
+; RV32-NEXT: or a1, a1, a3
+; RV32-NEXT: or a0, a1, a0
; RV32-NEXT: ret
;
; RV64-LABEL: reduce_or_4xi32:
; RV64: # %bb.0:
-; RV64-NEXT: ld a1, 24(a0)
-; RV64-NEXT: ld a2, 8(a0)
-; RV64-NEXT: ld a3, 0(a0)
-; RV64-NEXT: ld a0, 16(a0)
-; RV64-NEXT: or a2, a3, a2
-; RV64-NEXT: or a0, a0, a1
-; RV64-NEXT: or a0, a2, a0
+; RV64-NEXT: ld a1, 8(a0)
+; RV64-NEXT: ld a2, 0(a0)
+; RV64-NEXT: ld a3, 16(a0)
+; RV64-NEXT: ld a0, 24(a0)
+; RV64-NEXT: or a1, a2, a1
+; RV64-NEXT: or a1, a1, a3
+; RV64-NEXT: or a0, a1, a0
; RV64-NEXT: ret
%e0 = extractelement <4 x i32> %v, i32 0
%e1 = extractelement <4 x i32> %v, i32 1
diff --git a/llvm/test/CodeGen/RISCV/rv32e.ll b/llvm/test/CodeGen/RISCV/rv32e.ll
index ff73dd216da22..c68a465b96420 100644
--- a/llvm/test/CodeGen/RISCV/rv32e.ll
+++ b/llvm/test/CodeGen/RISCV/rv32e.ll
@@ -9,10 +9,10 @@ define i32 @exhausted(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g) {
; CHECK: # %bb.0:
; CHECK-NEXT: lw t0, 0(sp)
; CHECK-NEXT: add a0, a0, a1
-; CHECK-NEXT: add a2, a3, a2
; CHECK-NEXT: add a0, a2, a0
-; CHECK-NEXT: add a4, a5, a4
+; CHECK-NEXT: add a0, a3, a0
; CHECK-NEXT: add a0, a4, a0
+; CHECK-NEXT: add a0, a5, a0
; CHECK-NEXT: add a0, t0, a0
; CHECK-NEXT: ret
%1 = add i32 %a, %b
diff --git a/llvm/test/CodeGen/RISCV/rv32zba.ll b/llvm/test/CodeGen/RISCV/rv32zba.ll
index 89273ef0e50b5..29dc593825137 100644
--- a/llvm/test/CodeGen/RISCV/rv32zba.ll
+++ b/llvm/test/CodeGen/RISCV/rv32zba.ll
@@ -673,8 +673,8 @@ define i32 @mul_neg3(i32 %a) {
; RV32I-LABEL: mul_neg3:
; RV32I: # %bb.0:
; RV32I-NEXT: slli a1, a0, 1
+; RV32I-NEXT: add a0, a1, a0
; RV32I-NEXT: neg a0, a0
-; RV32I-NEXT: sub a0, a0, a1
; RV32I-NEXT: ret
;
; RV32ZBA-LABEL: mul_neg3:
@@ -700,8 +700,8 @@ define i32 @mul_neg5(i32 %a) {
; RV32I-LABEL: mul_neg5:
; RV32I: # %bb.0:
; RV32I-NEXT: slli a1, a0, 2
+; RV32I-NEXT: add a0, a1, a0
; RV32I-NEXT: neg a0, a0
-; RV32I-NEXT: sub a0, a0, a1
; RV32I-NEXT: ret
;
; RV32ZBA-LABEL: mul_neg5:
diff --git a/llvm/test/CodeGen/RISCV/rv32zbb.ll b/llvm/test/CodeGen/RISCV/rv32zbb.ll
index cb9fc6c16333e..70693fe7c2285 100644
--- a/llvm/test/CodeGen/RISCV/rv32zbb.ll
+++ b/llvm/test/CodeGen/RISCV/rv32zbb.ll
@@ -1253,8 +1253,8 @@ define i64 @abs_i64(i64 %x) {
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: snez a2, a0
; CHECK-NEXT: neg a0, a0
+; CHECK-NEXT: add a1, a1, a2
; CHECK-NEXT: neg a1, a1
-; CHECK-NEXT: sub a1, a1, a2
; CHECK-NEXT: .LBB37_2:
; CHECK-NEXT: ret
%abs = tail call i64 @llvm.abs.i64(i64 %x, i1 true)
diff --git a/llvm/test/CodeGen/RISCV/rv64e.ll b/llvm/test/CodeGen/RISCV/rv64e.ll
index 093d503750abc..22ca713888895 100644
--- a/llvm/test/CodeGen/RISCV/rv64e.ll
+++ b/llvm/test/CodeGen/RISCV/rv64e.ll
@@ -9,10 +9,10 @@ define i64 @exhausted(i64 %a, i64 %b, i64 %c, i64 %d, i64 %e, i64 %f, i64 %g) {
; CHECK: # %bb.0:
; CHECK-NEXT: ld t0, 0(sp)
; CHECK-NEXT: add a0, a0, a1
-; CHECK-NEXT: add a2, a3, a2
; CHECK-NEXT: add a0, a2, a0
-; CHECK-NEXT: add a4, a5, a4
+; CHECK-NEXT: add a0, a3, a0
; CHECK-NEXT: add a0, a4, a0
+; CHECK-NEXT: add a0, a5, a0
; CHECK-NEXT: add a0, t0, a0
; CHECK-NEXT: ret
%1 = add i64 %a, %b
diff --git a/llvm/test/CodeGen/RISCV/rv64zba.ll b/llvm/test/CodeGen/RISCV/rv64zba.ll
index 20a0484464018..d7c1b8532779b 100644
--- a/llvm/test/CodeGen/RISCV/rv64zba.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zba.ll
@@ -2248,8 +2248,8 @@ define i8 @array_index_sh1_sh0(ptr %p, i64 %idx1, i64 %idx2) {
; RV64I-LABEL: array_index_sh1_sh0:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a1, 1
-; RV64I-NEXT: add a0, a0, a2
; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: add a0, a0, a2
; RV64I-NEXT: lbu a0, 0(a0)
; RV64I-NEXT: ret
;
@@ -2331,8 +2331,8 @@ define i8 @array_index_sh2_sh0(ptr %p, i64 %idx1, i64 %idx2) {
; RV64I-LABEL: array_index_sh2_sh0:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a1, 2
-; RV64I-NEXT: add a0, a0, a2
; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: add a0, a0, a2
; RV64I-NEXT: lbu a0, 0(a0)
; RV64I-NEXT: ret
;
@@ -2414,8 +2414,8 @@ define i8 @array_index_sh3_sh0(ptr %p, i64 %idx1, i64 %idx2) {
; RV64I-LABEL: array_index_sh3_sh0:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a1, 3
-; RV64I-NEXT: add a0, a0, a2
; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: add a0, a0, a2
; RV64I-NEXT: lbu a0, 0(a0)
; RV64I-NEXT: ret
;
@@ -2500,17 +2500,18 @@ define i64 @array_index_lshr_sh3_sh3(ptr %p, i64 %idx1, i64 %idx2) {
; RV64I: # %bb.0:
; RV64I-NEXT: srli a1, a1, 58
; RV64I-NEXT: slli a1, a1, 6
+; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: slli a2, a2, 3
; RV64I-NEXT: add a0, a0, a2
-; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: ld a0, 0(a0)
; RV64I-NEXT: ret
;
; RV64ZBA-LABEL: array_index_lshr_sh3_sh3:
; RV64ZBA: # %bb.0:
; RV64ZBA-NEXT: srli a1, a1, 58
-; RV64ZBA-NEXT: sh3add a1, a1, a2
-; RV64ZBA-NEXT: sh3add a0, a1, a0
+; RV64ZBA-NEXT: slli a1, a1, 6
+; RV64ZBA-NEXT: add a0, a0, a1
+; RV64ZBA-NEXT: sh3add a0, a2, a0
; RV64ZBA-NEXT: ld a0, 0(a0)
; RV64ZBA-NEXT: ret
%shr = lshr i64 %idx1, 58
@@ -2523,8 +2524,8 @@ define i8 @array_index_sh4_sh0(ptr %p, i64 %idx1, i64 %idx2) {
; CHECK-LABEL: array_index_sh4_sh0:
; CHECK: # %bb.0:
; CHECK-NEXT: slli a1, a1, 4
-; CHECK-NEXT: add a0, a0, a2
; CHECK-NEXT: add a0, a0, a1
+; CHECK-NEXT: add a0, a0, a2
; CHECK-NEXT: lbu a0, 0(a0)
; CHECK-NEXT: ret
%a = getelementptr inbounds [16 x i8], ptr %p, i64 %idx1, i64 %idx2
@@ -2603,15 +2604,16 @@ define ptr @test_gep_gep_dont_crash(ptr %p, i64 %a1, i64 %a2) {
; RV64I: # %bb.0:
; RV64I-NEXT: srliw a2, a2, 6
; RV64I-NEXT: slli a2, a2, 3
+; RV64I-NEXT: add a0, a0, a2
; RV64I-NEXT: slli a1, a1, 3
; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: add a0, a0, a2
; RV64I-NEXT: ret
;
; RV64ZBA-LABEL: test_gep_gep_dont_crash:
; RV64ZBA: # %bb.0:
; RV64ZBA-NEXT: srliw a2, a2, 6
-; RV64ZBA-NEXT: add a1, a2, a1
+; RV64ZBA-NEXT: slli a2, a2, 3
+; RV64ZBA-NEXT: add a0, a0, a2
; RV64ZBA-NEXT: sh3add a0, a1, a0
; RV64ZBA-NEXT: ret
%lshr = lshr i64 %a2, 6
@@ -2666,8 +2668,8 @@ define i64 @mul_neg3(i64 %a) {
; RV64I-LABEL: mul_neg3:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a0, 1
+; RV64I-NEXT: add a0, a1, a0
; RV64I-NEXT: neg a0, a0
-; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; RV64ZBA-LABEL: mul_neg3:
@@ -2693,8 +2695,8 @@ define i64 @mul_neg5(i64 %a) {
; RV64I-LABEL: mul_neg5:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a0, 2
+; RV64I-NEXT: add a0, a1, a0
; RV64I-NEXT: neg a0, a0
-; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; RV64ZBA-LABEL: mul_neg5:
diff --git a/llvm/test/CodeGen/RISCV/rvv/compressstore.ll b/llvm/test/CodeGen/RISCV/rvv/compressstore.ll
index 52811133c53f3..84fff0a3cf372 100644
--- a/llvm/test/CodeGen/RISCV/rvv/compressstore.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/compressstore.ll
@@ -234,7 +234,7 @@ define void @test_compresstore_v256i8(ptr %p, <256 x i1> %mask, <256 x i8> %data
; RV64-NEXT: vcpop.m a2, v8
; RV64-NEXT: cpop a3, a3
; RV64-NEXT: cpop a1, a1
-; RV64-NEXT: add a0, a0, a3
+; RV64-NEXT: add a1, a3, a1
; RV64-NEXT: add a0, a0, a1
; RV64-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; RV64-NEXT: vse8.v v16, (a0)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll
index dbc65620b7f24..ca8404930153b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll
@@ -141,9 +141,9 @@ define <3 x i15> @fp2si_v3f32_v3i15(<3 x float> %x) {
; ZVFH32-NEXT: vmv.x.s a4, v8
; ZVFH32-NEXT: and a3, a4, a3
; ZVFH32-NEXT: slli a3, a3, 15
+; ZVFH32-NEXT: or a2, a2, a3
; ZVFH32-NEXT: slli a1, a1, 30
; ZVFH32-NEXT: or a1, a2, a1
-; ZVFH32-NEXT: or a1, a1, a3
; ZVFH32-NEXT: sw a1, 0(a0)
; ZVFH32-NEXT: ret
;
@@ -159,10 +159,10 @@ define <3 x i15> @fp2si_v3f32_v3i15(<3 x float> %x) {
; ZVFH64-NEXT: vmv.x.s a3, v8
; ZVFH64-NEXT: and a2, a3, a2
; ZVFH64-NEXT: slli a2, a2, 15
+; ZVFH64-NEXT: or a1, a1, a2
; ZVFH64-NEXT: vslidedown.vi v8, v9, 2
-; ZVFH64-NEXT: vmv.x.s a3, v8
-; ZVFH64-NEXT: slli a3, a3, 30
-; ZVFH64-NEXT: or a1, a1, a3
+; ZVFH64-NEXT: vmv.x.s a2, v8
+; ZVFH64-NEXT: slli a2, a2, 30
; ZVFH64-NEXT: or a1, a1, a2
; ZVFH64-NEXT: sw a1, 0(a0)
; ZVFH64-NEXT: slli a1, a1, 19
@@ -187,9 +187,9 @@ define <3 x i15> @fp2si_v3f32_v3i15(<3 x float> %x) {
; ZVFHMIN32-NEXT: vmv.x.s a4, v8
; ZVFHMIN32-NEXT: and a3, a4, a3
; ZVFHMIN32-NEXT: slli a3, a3, 15
+; ZVFHMIN32-NEXT: or a2, a2, a3
; ZVFHMIN32-NEXT: slli a1, a1, 30
; ZVFHMIN32-NEXT: or a1, a2, a1
-; ZVFHMIN32-NEXT: or a1, a1, a3
; ZVFHMIN32-NEXT: sw a1, 0(a0)
; ZVFHMIN32-NEXT: ret
;
@@ -205,10 +205,10 @@ define <3 x i15> @fp2si_v3f32_v3i15(<3 x float> %x) {
; ZVFHMIN64-NEXT: vmv.x.s a3, v8
; ZVFHMIN64-NEXT: and a2, a3, a2
; ZVFHMIN64-NEXT: slli a2, a2, 15
+; ZVFHMIN64-NEXT: or a1, a1, a2
; ZVFHMIN64-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMIN64-NEXT: vmv.x.s a3, v8
-; ZVFHMIN64-NEXT: slli a3, a3, 30
-; ZVFHMIN64-NEXT: or a1, a1, a3
+; ZVFHMIN64-NEXT: vmv.x.s a2, v8
+; ZVFHMIN64-NEXT: slli a2, a2, 30
; ZVFHMIN64-NEXT: or a1, a1, a2
; ZVFHMIN64-NEXT: sw a1, 0(a0)
; ZVFHMIN64-NEXT: slli a1, a1, 19
@@ -238,9 +238,9 @@ define <3 x i15> @fp2ui_v3f32_v3i15(<3 x float> %x) {
; ZVFH32-NEXT: vmv.x.s a4, v8
; ZVFH32-NEXT: and a3, a4, a3
; ZVFH32-NEXT: slli a3, a3, 15
+; ZVFH32-NEXT: or a2, a2, a3
; ZVFH32-NEXT: slli a1, a1, 30
; ZVFH32-NEXT: or a1, a2, a1
-; ZVFH32-NEXT: or a1, a1, a3
; ZVFH32-NEXT: sw a1, 0(a0)
; ZVFH32-NEXT: ret
;
@@ -256,10 +256,10 @@ define <3 x i15> @fp2ui_v3f32_v3i15(<3 x float> %x) {
; ZVFH64-NEXT: vmv.x.s a3, v8
; ZVFH64-NEXT: and a2, a3, a2
; ZVFH64-NEXT: slli a2, a2, 15
+; ZVFH64-NEXT: or a1, a1, a2
; ZVFH64-NEXT: vslidedown.vi v8, v9, 2
-; ZVFH64-NEXT: vmv.x.s a3, v8
-; ZVFH64-NEXT: slli a3, a3, 30
-; ZVFH64-NEXT: or a1, a1, a3
+; ZVFH64-NEXT: vmv.x.s a2, v8
+; ZVFH64-NEXT: slli a2, a2, 30
; ZVFH64-NEXT: or a1, a1, a2
; ZVFH64-NEXT: sw a1, 0(a0)
; ZVFH64-NEXT: slli a1, a1, 19
@@ -284,9 +284,9 @@ define <3 x i15> @fp2ui_v3f32_v3i15(<3 x float> %x) {
; ZVFHMIN32-NEXT: vmv.x.s a4, v8
; ZVFHMIN32-NEXT: and a3, a4, a3
; ZVFHMIN32-NEXT: slli a3, a3, 15
+; ZVFHMIN32-NEXT: or a2, a2, a3
; ZVFHMIN32-NEXT: slli a1, a1, 30
; ZVFHMIN32-NEXT: or a1, a2, a1
-; ZVFHMIN32-NEXT: or a1, a1, a3
; ZVFHMIN32-NEXT: sw a1, 0(a0)
; ZVFHMIN32-NEXT: ret
;
@@ -302,10 +302,10 @@ define <3 x i15> @fp2ui_v3f32_v3i15(<3 x float> %x) {
; ZVFHMIN64-NEXT: vmv.x.s a3, v8
; ZVFHMIN64-NEXT: and a2, a3, a2
; ZVFHMIN64-NEXT: slli a2, a2, 15
+; ZVFHMIN64-NEXT: or a1, a1, a2
; ZVFHMIN64-NEXT: vslidedown.vi v8, v9, 2
-; ZVFHMIN64-NEXT: vmv.x.s a3, v8
-; ZVFHMIN64-NEXT: slli a3, a3, 30
-; ZVFHMIN64-NEXT: or a1, a1, a3
+; ZVFHMIN64-NEXT: vmv.x.s a2, v8
+; ZVFHMIN64-NEXT: slli a2, a2, 30
; ZVFHMIN64-NEXT: or a1, a1, a2
; ZVFHMIN64-NEXT: sw a1, 0(a0)
; ZVFHMIN64-NEXT: slli a1, a1, 19
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-explodevector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-explodevector.ll
index e0c676788dccc..f6f3097fb1e6a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-explodevector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-explodevector.ll
@@ -28,8 +28,8 @@ define i8 @explode_4xi8(<4 x i8> %v) {
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
; CHECK-NEXT: vredxor.vs v8, v8, v9
; CHECK-NEXT: vmv.x.s a2, v8
-; CHECK-NEXT: add a0, a0, a1
; CHECK-NEXT: add a0, a2, a0
+; CHECK-NEXT: add a0, a0, a1
; CHECK-NEXT: ret
%e0 = extractelement <4 x i8> %v, i32 0
%e1 = extractelement <4 x i8> %v, i32 1
@@ -62,11 +62,11 @@ define i8 @explode_8xi8(<8 x i8> %v) {
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
; CHECK-NEXT: vredxor.vs v8, v8, v9
; CHECK-NEXT: vmv.x.s a6, v8
-; CHECK-NEXT: add a0, a0, a1
; CHECK-NEXT: add a0, a6, a0
-; CHECK-NEXT: add a2, a2, a3
-; CHECK-NEXT: add a2, a2, a4
+; CHECK-NEXT: add a0, a0, a1
; CHECK-NEXT: add a0, a0, a2
+; CHECK-NEXT: add a0, a0, a3
+; CHECK-NEXT: add a0, a0, a4
; CHECK-NEXT: add a0, a0, a5
; CHECK-NEXT: ret
%e0 = extractelement <8 x i8> %v, i32 0
@@ -123,20 +123,20 @@ define i8 @explode_16xi8(<16 x i8> %v) {
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
; CHECK-NEXT: vredxor.vs v8, v8, v9
; CHECK-NEXT: vmv.x.s t6, v8
-; CHECK-NEXT: add a0, a0, a1
; CHECK-NEXT: add a0, t6, a0
-; CHECK-NEXT: add a2, a2, a3
-; CHECK-NEXT: add a2, a2, a4
+; CHECK-NEXT: add a0, a0, a1
; CHECK-NEXT: add a0, a0, a2
-; CHECK-NEXT: add a5, a5, a6
-; CHECK-NEXT: add a5, a5, a7
-; CHECK-NEXT: add a5, a5, t0
+; CHECK-NEXT: add a0, a0, a3
+; CHECK-NEXT: add a0, a0, a4
; CHECK-NEXT: add a0, a0, a5
-; CHECK-NEXT: add t1, t1, t2
-; CHECK-NEXT: add t1, t1, t3
-; CHECK-NEXT: add t1, t1, t4
-; CHECK-NEXT: add t1, t1, t5
+; CHECK-NEXT: add a0, a0, a6
+; CHECK-NEXT: add a0, a0, a7
+; CHECK-NEXT: add a0, a0, t0
; CHECK-NEXT: add a0, a0, t1
+; CHECK-NEXT: add a0, a0, t2
+; CHECK-NEXT: add a0, a0, t3
+; CHECK-NEXT: add a0, a0, t4
+; CHECK-NEXT: add a0, a0, t5
; CHECK-NEXT: ret
%e0 = extractelement <16 x i8> %v, i32 0
%e1 = extractelement <16 x i8> %v, i32 1
@@ -198,8 +198,8 @@ define i16 @explode_4xi16(<4 x i16> %v) {
; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
; CHECK-NEXT: vredxor.vs v8, v8, v9
; CHECK-NEXT: vmv.x.s a2, v8
-; CHECK-NEXT: add a0, a0, a1
; CHECK-NEXT: add a0, a2, a0
+; CHECK-NEXT: add a0, a0, a1
; CHECK-NEXT: ret
%e0 = extractelement <4 x i16> %v, i32 0
%e1 = extractelement <4 x i16> %v, i32 1
@@ -232,11 +232,11 @@ define i16 @explode_8xi16(<8 x i16> %v) {
; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
; CHECK-NEXT: vredxor.vs v8, v8, v9
; CHECK-NEXT: vmv.x.s a6, v8
-; CHECK-NEXT: add a0, a0, a1
; CHECK-NEXT: add a0, a6, a0
-; CHECK-NEXT: add a2, a2, a3
-; CHECK-NEXT: add a2, a2, a4
+; CHECK-NEXT: add a0, a0, a1
; CHECK-NEXT: add a0, a0, a2
+; CHECK-NEXT: add a0, a0, a3
+; CHECK-NEXT: add a0, a0, a4
; CHECK-NEXT: add a0, a0, a5
; CHECK-NEXT: ret
%e0 = extractelement <8 x i16> %v, i32 0
@@ -294,20 +294,20 @@ define i16 @explode_16xi16(<16 x i16> %v) {
; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
; CHECK-NEXT: vredxor.vs v8, v8, v9
; CHECK-NEXT: vmv.x.s t6, v8
-; CHECK-NEXT: add a0, a0, a1
; CHECK-NEXT: add a0, t6, a0
-; CHECK-NEXT: add a2, a2, a3
-; CHECK-NEXT: add a2, a2, a4
+; CHECK-NEXT: add a0, a0, a1
; CHECK-NEXT: add a0, a0, a2
-; CHECK-NEXT: add a5, a5, a6
-; CHECK-NEXT: add a5, a5, a7
-; CHECK-NEXT: add a5, a5, t0
+; CHECK-NEXT: add a0, a0, a3
+; CHECK-NEXT: add a0, a0, a4
; CHECK-NEXT: add a0, a0, a5
-; CHECK-NEXT: add t1, t1, t2
-; CHECK-NEXT: add t1, t1, t3
-; CHECK-NEXT: add t1, t1, t4
-; CHECK-NEXT: add t1, t1, t5
+; CHECK-NEXT: add a0, a0, a6
+; CHECK-NEXT: add a0, a0, a7
+; CHECK-NEXT: add a0, a0, t0
; CHECK-NEXT: add a0, a0, t1
+; CHECK-NEXT: add a0, a0, t2
+; CHECK-NEXT: add a0, a0, t3
+; CHECK-NEXT: add a0, a0, t4
+; CHECK-NEXT: add a0, a0, t5
; CHECK-NEXT: ret
%e0 = extractelement <16 x i16> %v, i32 0
%e1 = extractelement <16 x i16> %v, i32 1
@@ -369,8 +369,8 @@ define i32 @explode_4xi32(<4 x i32> %v) {
; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; RV32-NEXT: vredxor.vs v8, v8, v9
; RV32-NEXT: vmv.x.s a2, v8
-; RV32-NEXT: add a0, a0, a1
; RV32-NEXT: add a0, a2, a0
+; RV32-NEXT: add a0, a0, a1
; RV32-NEXT: ret
;
; RV64-LABEL: explode_4xi32:
@@ -384,8 +384,8 @@ define i32 @explode_4xi32(<4 x i32> %v) {
; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; RV64-NEXT: vredxor.vs v8, v8, v9
; RV64-NEXT: vmv.x.s a2, v8
-; RV64-NEXT: add a0, a0, a1
-; RV64-NEXT: addw a0, a2, a0
+; RV64-NEXT: add a0, a2, a0
+; RV64-NEXT: addw a0, a0, a1
; RV64-NEXT: ret
%e0 = extractelement <4 x i32> %v, i32 0
%e1 = extractelement <4 x i32> %v, i32 1
@@ -419,11 +419,11 @@ define i32 @explode_8xi32(<8 x i32> %v) {
; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; RV32-NEXT: vredxor.vs v8, v8, v9
; RV32-NEXT: vmv.x.s a6, v8
-; RV32-NEXT: add a0, a0, a1
; RV32-NEXT: add a0, a6, a0
-; RV32-NEXT: add a2, a2, a3
-; RV32-NEXT: add a2, a2, a4
+; RV32-NEXT: add a0, a0, a1
; RV32-NEXT: add a0, a0, a2
+; RV32-NEXT: add a0, a0, a3
+; RV32-NEXT: add a0, a0, a4
; RV32-NEXT: add a0, a0, a5
; RV32-NEXT: ret
;
@@ -447,11 +447,11 @@ define i32 @explode_8xi32(<8 x i32> %v) {
; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; RV64-NEXT: vredxor.vs v8, v8, v9
; RV64-NEXT: vmv.x.s a6, v8
-; RV64-NEXT: add a0, a0, a1
; RV64-NEXT: add a0, a6, a0
-; RV64-NEXT: add a2, a2, a3
-; RV64-NEXT: add a2, a2, a4
+; RV64-NEXT: add a0, a0, a1
; RV64-NEXT: add a0, a0, a2
+; RV64-NEXT: add a0, a0, a3
+; RV64-NEXT: add a0, a0, a4
; RV64-NEXT: addw a0, a0, a5
; RV64-NEXT: ret
%e0 = extractelement <8 x i32> %v, i32 0
@@ -513,20 +513,20 @@ define i32 @explode_16xi32(<16 x i32> %v) {
; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; RV32-NEXT: vredxor.vs v8, v8, v9
; RV32-NEXT: vmv.x.s t6, v8
-; RV32-NEXT: add a0, a0, a1
; RV32-NEXT: add a0, t6, a0
-; RV32-NEXT: add a2, a2, a3
-; RV32-NEXT: add a2, a2, a4
+; RV32-NEXT: add a0, a0, a1
; RV32-NEXT: add a0, a0, a2
-; RV32-NEXT: add a5, a5, a6
+; RV32-NEXT: add a0, a0, a3
+; RV32-NEXT: add a0, a0, a4
; RV32-NEXT: add a0, a0, a5
-; RV32-NEXT: add a7, a7, t0
-; RV32-NEXT: add a7, a7, t1
+; RV32-NEXT: add a0, a0, a6
; RV32-NEXT: add a0, a0, a7
-; RV32-NEXT: add t2, t2, t3
-; RV32-NEXT: add t2, t2, t4
-; RV32-NEXT: add t2, t2, t5
+; RV32-NEXT: add a0, a0, t0
+; RV32-NEXT: add a0, a0, t1
; RV32-NEXT: add a0, a0, t2
+; RV32-NEXT: add a0, a0, t3
+; RV32-NEXT: add a0, a0, t4
+; RV32-NEXT: add a0, a0, t5
; RV32-NEXT: addi sp, s0, -128
; RV32-NEXT: lw ra, 124(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s0, 120(sp) # 4-byte Folded Reload
@@ -573,20 +573,20 @@ define i32 @explode_16xi32(<16 x i32> %v) {
; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; RV64-NEXT: vredxor.vs v8, v8, v9
; RV64-NEXT: vmv.x.s t6, v8
-; RV64-NEXT: add a0, a0, a1
; RV64-NEXT: add a0, t6, a0
-; RV64-NEXT: add a2, a2, a3
-; RV64-NEXT: add a2, a2, a4
+; RV64-NEXT: add a0, a0, a1
; RV64-NEXT: add a0, a0, a2
-; RV64-NEXT: add a5, a5, a6
+; RV64-NEXT: add a0, a0, a3
+; RV64-NEXT: add a0, a0, a4
; RV64-NEXT: add a0, a0, a5
-; RV64-NEXT: add a7, a7, t0
-; RV64-NEXT: add a7, a7, t1
+; RV64-NEXT: add a0, a0, a6
; RV64-NEXT: add a0, a0, a7
-; RV64-NEXT: add t2, t2, t3
-; RV64-NEXT: add t2, t2, t4
-; RV64-NEXT: add t2, t2, t5
-; RV64-NEXT: addw a0, a0, t2
+; RV64-NEXT: add a0, a0, t0
+; RV64-NEXT: add a0, a0, t1
+; RV64-NEXT: add a0, a0, t2
+; RV64-NEXT: add a0, a0, t3
+; RV64-NEXT: add a0, a0, t4
+; RV64-NEXT: addw a0, a0, t5
; RV64-NEXT: addi sp, s0, -128
; RV64-NEXT: ld ra, 120(sp) # 8-byte Folded Reload
; RV64-NEXT: ld s0, 112(sp) # 8-byte Folded Reload
@@ -693,8 +693,8 @@ define i64 @explode_4xi64(<4 x i64> %v) {
; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV64-NEXT: vredxor.vs v8, v8, v9
; RV64-NEXT: vmv.x.s a2, v8
-; RV64-NEXT: add a0, a0, a1
; RV64-NEXT: add a0, a2, a0
+; RV64-NEXT: add a0, a0, a1
; RV64-NEXT: ret
%e0 = extractelement <4 x i64> %v, i32 0
%e1 = extractelement <4 x i64> %v, i32 1
@@ -750,20 +750,20 @@ define i64 @explode_8xi64(<8 x i64> %v) {
; RV32-NEXT: add a0, a0, a3
; RV32-NEXT: add a4, a2, a4
; RV32-NEXT: sltu a1, a4, a2
-; RV32-NEXT: add a1, a1, a5
; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: add a0, a0, a5
; RV32-NEXT: add a6, a4, a6
; RV32-NEXT: sltu a1, a6, a4
-; RV32-NEXT: add a1, a1, a7
; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: add a0, a0, a7
; RV32-NEXT: add t0, a6, t0
; RV32-NEXT: sltu a1, t0, a6
-; RV32-NEXT: add a1, a1, t1
; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: add a0, a0, t1
; RV32-NEXT: add t2, t0, t2
; RV32-NEXT: sltu a1, t2, t0
-; RV32-NEXT: add a1, a1, t3
-; RV32-NEXT: add a1, a0, a1
+; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: add a1, a0, t3
; RV32-NEXT: add a0, t2, t4
; RV32-NEXT: sltu a2, a0, t2
; RV32-NEXT: add a1, a1, a2
@@ -796,11 +796,11 @@ define i64 @explode_8xi64(<8 x i64> %v) {
; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV64-NEXT: vredxor.vs v8, v8, v9
; RV64-NEXT: vmv.x.s a6, v8
-; RV64-NEXT: add a0, a0, a1
; RV64-NEXT: add a0, a6, a0
+; RV64-NEXT: add a0, a0, a1
; RV64-NEXT: add a0, a0, a2
-; RV64-NEXT: add a3, a3, a4
; RV64-NEXT: add a0, a0, a3
+; RV64-NEXT: add a0, a0, a4
; RV64-NEXT: add a0, a0, a5
; RV64-NEXT: addi sp, s0, -128
; RV64-NEXT: ld ra, 120(sp) # 8-byte Folded Reload
@@ -902,8 +902,8 @@ define i64 @explode_16xi64(<16 x i64> %v) {
; RV32-NEXT: vmv.x.s s7, v16
; RV32-NEXT: vslidedown.vi v16, v8, 13
; RV32-NEXT: vsrl.vx v24, v16, a0
-; RV32-NEXT: vmv.x.s s9, v24
-; RV32-NEXT: vmv.x.s s8, v16
+; RV32-NEXT: vmv.x.s s8, v24
+; RV32-NEXT: vmv.x.s s9, v16
; RV32-NEXT: vslidedown.vi v16, v8, 14
; RV32-NEXT: vsrl.vx v24, v16, a0
; RV32-NEXT: vmv.s.x v17, zero
@@ -924,59 +924,59 @@ define i64 @explode_16xi64(<16 x i64> %v) {
; RV32-NEXT: add a0, a0, a3
; RV32-NEXT: add a4, a2, a4
; RV32-NEXT: sltu a1, a4, a2
-; RV32-NEXT: add a1, a1, a5
; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: add a0, a0, a5
; RV32-NEXT: add a6, a4, a6
; RV32-NEXT: sltu a1, a6, a4
-; RV32-NEXT: add a1, a1, a7
; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: add a0, a0, a7
; RV32-NEXT: add t0, a6, t0
; RV32-NEXT: sltu a1, t0, a6
-; RV32-NEXT: add a1, a1, t1
; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: add a0, a0, t1
; RV32-NEXT: add t2, t0, t2
; RV32-NEXT: sltu a1, t2, t0
-; RV32-NEXT: add a1, a1, t3
; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: add a0, a0, t3
; RV32-NEXT: add t4, t2, t4
; RV32-NEXT: sltu a1, t4, t2
-; RV32-NEXT: add a1, a1, t5
; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: add a0, a0, t5
; RV32-NEXT: add t6, t4, t6
; RV32-NEXT: sltu a1, t6, t4
-; RV32-NEXT: add a1, a1, s0
; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: add a0, a0, s0
; RV32-NEXT: add s1, t6, s1
; RV32-NEXT: sltu a1, s1, t6
-; RV32-NEXT: add a1, a1, s2
; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: add a0, a0, s2
; RV32-NEXT: add s3, s1, s3
; RV32-NEXT: sltu a1, s3, s1
-; RV32-NEXT: add a1, a1, s4
; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: add a0, a0, s4
; RV32-NEXT: add s5, s3, s5
; RV32-NEXT: sltu a1, s5, s3
-; RV32-NEXT: add a1, a1, s6
; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: add a0, a0, s6
; RV32-NEXT: add s7, s5, s7
; RV32-NEXT: sltu a1, s7, s5
-; RV32-NEXT: add a1, a1, s9
+; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: add a0, a0, s8
+; RV32-NEXT: add s9, s7, s9
+; RV32-NEXT: sltu a1, s9, s7
; RV32-NEXT: add a0, a0, a1
; RV32-NEXT: vmv.x.s a1, v24
-; RV32-NEXT: add s8, s7, s8
-; RV32-NEXT: sltu a2, s8, s7
-; RV32-NEXT: add a1, a2, a1
-; RV32-NEXT: vmv.x.s a2, v16
; RV32-NEXT: add a0, a0, a1
-; RV32-NEXT: vmv.x.s a1, v0
-; RV32-NEXT: add a2, s8, a2
-; RV32-NEXT: sltu a3, a2, s8
-; RV32-NEXT: add a1, a3, a1
-; RV32-NEXT: add a1, a0, a1
+; RV32-NEXT: vmv.x.s a1, v16
+; RV32-NEXT: add a1, s9, a1
+; RV32-NEXT: sltu a2, a1, s9
+; RV32-NEXT: add a0, a0, a2
+; RV32-NEXT: vmv.x.s a2, v0
+; RV32-NEXT: add a2, a0, a2
; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: add a0, a2, a0
-; RV32-NEXT: sltu a2, a0, a2
-; RV32-NEXT: add a1, a1, a2
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: sltu a1, a0, a1
+; RV32-NEXT: add a1, a2, a1
; RV32-NEXT: lw s0, 44(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s1, 40(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s2, 36(sp) # 4-byte Folded Reload
@@ -1027,20 +1027,20 @@ define i64 @explode_16xi64(<16 x i64> %v) {
; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV64-NEXT: vredxor.vs v8, v8, v9
; RV64-NEXT: vmv.x.s t6, v8
-; RV64-NEXT: add a0, a0, a1
; RV64-NEXT: add a0, t6, a0
+; RV64-NEXT: add a0, a0, a1
; RV64-NEXT: add a0, a0, a2
-; RV64-NEXT: add a3, a3, a4
; RV64-NEXT: add a0, a0, a3
-; RV64-NEXT: add a5, a5, a6
-; RV64-NEXT: add a5, a5, a7
+; RV64-NEXT: add a0, a0, a4
; RV64-NEXT: add a0, a0, a5
-; RV64-NEXT: add t0, t0, t1
-; RV64-NEXT: add t0, t0, t2
-; RV64-NEXT: add t0, t0, t3
+; RV64-NEXT: add a0, a0, a6
+; RV64-NEXT: add a0, a0, a7
; RV64-NEXT: add a0, a0, t0
-; RV64-NEXT: add t4, t4, t5
+; RV64-NEXT: add a0, a0, t1
+; RV64-NEXT: add a0, a0, t2
+; RV64-NEXT: add a0, a0, t3
; RV64-NEXT: add a0, a0, t4
+; RV64-NEXT: add a0, a0, t5
; RV64-NEXT: addi sp, s0, -256
; RV64-NEXT: ld ra, 248(sp) # 8-byte Folded Reload
; RV64-NEXT: ld s0, 240(sp) # 8-byte Folded Reload
@@ -1113,20 +1113,20 @@ define i32 @explode_16xi32_exact_vlen(<16 x i32> %v) vscale_range(2, 2) {
; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; RV32-NEXT: vredxor.vs v8, v8, v9
; RV32-NEXT: vmv.x.s t6, v8
-; RV32-NEXT: add a0, a0, a1
; RV32-NEXT: add a0, t6, a0
-; RV32-NEXT: add a2, a2, a3
-; RV32-NEXT: add a2, a2, a4
+; RV32-NEXT: add a0, a0, a1
; RV32-NEXT: add a0, a0, a2
-; RV32-NEXT: add a5, a5, a6
-; RV32-NEXT: add a5, a5, a7
-; RV32-NEXT: add a5, a5, t0
+; RV32-NEXT: add a0, a0, a3
+; RV32-NEXT: add a0, a0, a4
; RV32-NEXT: add a0, a0, a5
-; RV32-NEXT: add t1, t1, t2
-; RV32-NEXT: add t1, t1, t3
-; RV32-NEXT: add t1, t1, t4
-; RV32-NEXT: add t1, t1, t5
+; RV32-NEXT: add a0, a0, a6
+; RV32-NEXT: add a0, a0, a7
+; RV32-NEXT: add a0, a0, t0
; RV32-NEXT: add a0, a0, t1
+; RV32-NEXT: add a0, a0, t2
+; RV32-NEXT: add a0, a0, t3
+; RV32-NEXT: add a0, a0, t4
+; RV32-NEXT: add a0, a0, t5
; RV32-NEXT: ret
;
; RV64-LABEL: explode_16xi32_exact_vlen:
@@ -1161,20 +1161,20 @@ define i32 @explode_16xi32_exact_vlen(<16 x i32> %v) vscale_range(2, 2) {
; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; RV64-NEXT: vredxor.vs v8, v8, v9
; RV64-NEXT: vmv.x.s t6, v8
-; RV64-NEXT: add a0, a0, a1
; RV64-NEXT: add a0, t6, a0
-; RV64-NEXT: add a2, a2, a3
-; RV64-NEXT: add a2, a2, a4
+; RV64-NEXT: add a0, a0, a1
; RV64-NEXT: add a0, a0, a2
-; RV64-NEXT: add a5, a5, a6
-; RV64-NEXT: add a5, a5, a7
-; RV64-NEXT: add a5, a5, t0
+; RV64-NEXT: add a0, a0, a3
+; RV64-NEXT: add a0, a0, a4
; RV64-NEXT: add a0, a0, a5
-; RV64-NEXT: add t1, t1, t2
-; RV64-NEXT: add t1, t1, t3
-; RV64-NEXT: add t1, t1, t4
-; RV64-NEXT: add t1, t1, t5
-; RV64-NEXT: addw a0, a0, t1
+; RV64-NEXT: add a0, a0, a6
+; RV64-NEXT: add a0, a0, a7
+; RV64-NEXT: add a0, a0, t0
+; RV64-NEXT: add a0, a0, t1
+; RV64-NEXT: add a0, a0, t2
+; RV64-NEXT: add a0, a0, t3
+; RV64-NEXT: add a0, a0, t4
+; RV64-NEXT: addw a0, a0, t5
; RV64-NEXT: ret
%e0 = extractelement <16 x i32> %v, i32 0
%e1 = extractelement <16 x i32> %v, i32 1
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-formation.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-formation.ll
index c0bd49cc9c5cb..0767a0195951e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-formation.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-formation.ll
@@ -482,8 +482,8 @@ define i32 @reduce_sum_4xi32_reduce_order(<4 x i32> %v) {
; RV32-NEXT: vslidedown.vi v8, v8, 3
; RV32-NEXT: vmv.x.s a3, v8
; RV32-NEXT: add a1, a1, a2
-; RV32-NEXT: add a0, a0, a3
; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: add a0, a0, a3
; RV32-NEXT: ret
;
; RV64-LABEL: reduce_sum_4xi32_reduce_order:
@@ -497,8 +497,8 @@ define i32 @reduce_sum_4xi32_reduce_order(<4 x i32> %v) {
; RV64-NEXT: vslidedown.vi v8, v8, 3
; RV64-NEXT: vmv.x.s a3, v8
; RV64-NEXT: add a1, a1, a2
-; RV64-NEXT: add a0, a0, a3
-; RV64-NEXT: addw a0, a0, a1
+; RV64-NEXT: add a0, a0, a1
+; RV64-NEXT: addw a0, a0, a3
; RV64-NEXT: ret
%e0 = extractelement <4 x i32> %v, i32 0
%e1 = extractelement <4 x i32> %v, i32 1
@@ -969,8 +969,8 @@ define float @reduce_fadd_4xi32_non_associative2(ptr %p) {
; CHECK-NEXT: vslidedown.vi v8, v8, 3
; CHECK-NEXT: vfmv.f.s fa2, v8
; CHECK-NEXT: fadd.s fa5, fa5, fa4
-; CHECK-NEXT: fadd.s fa4, fa3, fa2
-; CHECK-NEXT: fadd.s fa0, fa5, fa4
+; CHECK-NEXT: fadd.s fa5, fa5, fa3
+; CHECK-NEXT: fadd.s fa0, fa5, fa2
; CHECK-NEXT: ret
%v = load <4 x float>, ptr %p, align 256
%e0 = extractelement <4 x float> %v, i32 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-store.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-store.ll
index 7f18ee44631a1..24892abf3b3e3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-store.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-store.ll
@@ -93,12 +93,12 @@ define void @store_v6i1(ptr %p, <6 x i1> %v) {
; CHECK-NEXT: andi a3, a2, 2
; CHECK-NEXT: or a1, a1, a3
; CHECK-NEXT: andi a3, a2, 4
-; CHECK-NEXT: andi a4, a2, 8
-; CHECK-NEXT: or a3, a3, a4
+; CHECK-NEXT: or a1, a1, a3
+; CHECK-NEXT: andi a3, a2, 8
; CHECK-NEXT: or a1, a1, a3
; CHECK-NEXT: andi a3, a2, 16
+; CHECK-NEXT: or a1, a1, a3
; CHECK-NEXT: andi a2, a2, -32
-; CHECK-NEXT: or a2, a3, a2
; CHECK-NEXT: or a1, a1, a2
; CHECK-NEXT: andi a1, a1, 63
; CHECK-NEXT: sb a1, 0(a0)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-combine.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-combine.ll
index cdf0d35843620..5a44ae79b4e47 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-combine.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-combine.ll
@@ -541,8 +541,8 @@ define void @reverse_strided_runtime_4xv2f32(ptr %x, ptr %z, i64 %s) {
; CHECK-LABEL: reverse_strided_runtime_4xv2f32:
; CHECK: # %bb.0:
; CHECK-NEXT: add a0, a0, a2
-; CHECK-NEXT: add a3, a2, a2
-; CHECK-NEXT: add a0, a0, a3
+; CHECK-NEXT: add a0, a0, a2
+; CHECK-NEXT: add a0, a0, a2
; CHECK-NEXT: neg a2, a2
; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; CHECK-NEXT: vlse64.v v8, (a0), a2
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store-asm.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store-asm.ll
index e57b6a22dd6ea..4c512ffc39cfa 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store-asm.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store-asm.ll
@@ -799,35 +799,35 @@ define void @strided_load_startval_add_with_splat(ptr noalias nocapture %arg, pt
; CHECK-NEXT: addi a5, a5, 1
; CHECK-NEXT: andi a6, a5, -32
; CHECK-NEXT: add a4, a6, a2
-; CHECK-NEXT: slli t0, a2, 2
-; CHECK-NEXT: add a7, a0, a2
-; CHECK-NEXT: add a2, a1, a2
-; CHECK-NEXT: add a2, a2, t0
+; CHECK-NEXT: slli a7, a2, 2
+; CHECK-NEXT: add a7, a7, a2
+; CHECK-NEXT: add a2, a0, a2
+; CHECK-NEXT: add a7, a1, a7
; CHECK-NEXT: add t0, a4, a0
; CHECK-NEXT: li t2, 32
; CHECK-NEXT: li t1, 5
; CHECK-NEXT: vsetvli zero, t2, e8, m1, ta, ma
; CHECK-NEXT: .LBB14_3: # %bb15
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: vlse8.v v8, (a2), t1
-; CHECK-NEXT: vle8.v v9, (a7)
+; CHECK-NEXT: vlse8.v v8, (a7), t1
+; CHECK-NEXT: vle8.v v9, (a2)
; CHECK-NEXT: vadd.vv v8, v9, v8
-; CHECK-NEXT: vse8.v v8, (a7)
-; CHECK-NEXT: addi a7, a7, 32
-; CHECK-NEXT: addi a2, a2, 160
-; CHECK-NEXT: bne a7, t0, .LBB14_3
+; CHECK-NEXT: vse8.v v8, (a2)
+; CHECK-NEXT: addi a2, a2, 32
+; CHECK-NEXT: addi a7, a7, 160
+; CHECK-NEXT: bne a2, t0, .LBB14_3
; CHECK-NEXT: # %bb.4: # %bb30
; CHECK-NEXT: beq a5, a6, .LBB14_7
; CHECK-NEXT: .LBB14_5: # %bb32
; CHECK-NEXT: add a2, a0, a4
; CHECK-NEXT: slli a5, a4, 2
-; CHECK-NEXT: add a1, a1, a4
+; CHECK-NEXT: add a5, a5, a4
; CHECK-NEXT: add a1, a1, a5
; CHECK-NEXT: subw a3, a3, a4
; CHECK-NEXT: slli a3, a3, 32
; CHECK-NEXT: srli a3, a3, 32
-; CHECK-NEXT: add a0, a4, a0
-; CHECK-NEXT: add a0, a0, a3
+; CHECK-NEXT: add a3, a4, a3
+; CHECK-NEXT: add a0, a3, a0
; CHECK-NEXT: addi a0, a0, 1
; CHECK-NEXT: .LBB14_6: # %bb35
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-reassociations.ll b/llvm/test/CodeGen/RISCV/rvv/vector-reassociations.ll
index 79bd60d1702f3..37f92508a14fd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-reassociations.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-reassociations.ll
@@ -31,7 +31,7 @@ define <vscale x 1 x i8> @simple_vadd_vv(<vscale x 1 x i8> %0, <vscale x 1 x i8>
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vadd.vv v9, v8, v9
-; CHECK-NEXT: vadd.vv v8, v8, v8
+; CHECK-NEXT: vadd.vv v9, v8, v9
; CHECK-NEXT: vadd.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
@@ -61,7 +61,7 @@ define <vscale x 1 x i8> @simple_vadd_vsub_vv(<vscale x 1 x i8> %0, <vscale x 1
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vsub.vv v9, v8, v9
-; CHECK-NEXT: vadd.vv v8, v8, v8
+; CHECK-NEXT: vadd.vv v9, v8, v9
; CHECK-NEXT: vadd.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
@@ -91,7 +91,7 @@ define <vscale x 1 x i8> @simple_vmul_vv(<vscale x 1 x i8> %0, <vscale x 1 x i8>
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vmul.vv v9, v8, v9
-; CHECK-NEXT: vmul.vv v8, v8, v8
+; CHECK-NEXT: vmul.vv v9, v8, v9
; CHECK-NEXT: vmul.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
@@ -124,8 +124,8 @@ define <vscale x 1 x i8> @vadd_vv_passthru(<vscale x 1 x i8> %0, <vscale x 1 x i
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
; CHECK-NEXT: vadd.vv v10, v8, v9
; CHECK-NEXT: vmv1r.v v9, v8
-; CHECK-NEXT: vadd.vv v9, v8, v8
-; CHECK-NEXT: vadd.vv v8, v9, v10
+; CHECK-NEXT: vadd.vv v9, v8, v10
+; CHECK-NEXT: vadd.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
@@ -187,8 +187,8 @@ define <vscale x 1 x i8> @vadd_vv_mask(<vscale x 1 x i8> %0, <vscale x 1 x i8> %
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vadd.vv v10, v8, v9, v0.t
; CHECK-NEXT: vmv1r.v v9, v8
-; CHECK-NEXT: vadd.vv v9, v8, v8, v0.t
-; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t
+; CHECK-NEXT: vadd.vv v9, v8, v10, v0.t
+; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8(
diff --git a/llvm/test/CodeGen/RISCV/split-udiv-by-constant.ll b/llvm/test/CodeGen/RISCV/split-udiv-by-constant.ll
index 5fa802b7f27ca..1b78653e03f78 100644
--- a/llvm/test/CodeGen/RISCV/split-udiv-by-constant.ll
+++ b/llvm/test/CodeGen/RISCV/split-udiv-by-constant.ll
@@ -424,8 +424,8 @@ define iXLen2 @test_udiv_65535(iXLen2 %x) nounwind {
; RV32-NEXT: add a0, a0, a1
; RV32-NEXT: sub a1, a4, a0
; RV32-NEXT: slli a0, a3, 16
-; RV32-NEXT: neg a2, a3
-; RV32-NEXT: sub a0, a2, a0
+; RV32-NEXT: add a0, a0, a3
+; RV32-NEXT: neg a0, a0
; RV32-NEXT: ret
;
; RV64-LABEL: test_udiv_65535:
diff --git a/llvm/test/CodeGen/RISCV/srem-lkk.ll b/llvm/test/CodeGen/RISCV/srem-lkk.ll
index 7c291bbceedc6..ea71e6a5d5a5a 100644
--- a/llvm/test/CodeGen/RISCV/srem-lkk.ll
+++ b/llvm/test/CodeGen/RISCV/srem-lkk.ll
@@ -240,8 +240,8 @@ define i32 @combine_srem_sdiv(i32 %x) nounwind {
; RV32IM-NEXT: add a1, a1, a2
; RV32IM-NEXT: li a2, 95
; RV32IM-NEXT: mul a2, a1, a2
-; RV32IM-NEXT: add a0, a0, a1
; RV32IM-NEXT: sub a0, a0, a2
+; RV32IM-NEXT: add a0, a0, a1
; RV32IM-NEXT: ret
;
; RV64I-LABEL: combine_srem_sdiv:
@@ -278,8 +278,8 @@ define i32 @combine_srem_sdiv(i32 %x) nounwind {
; RV64IM-NEXT: add a1, a1, a2
; RV64IM-NEXT: li a2, 95
; RV64IM-NEXT: mul a2, a1, a2
-; RV64IM-NEXT: add a0, a0, a1
; RV64IM-NEXT: subw a0, a0, a2
+; RV64IM-NEXT: addw a0, a0, a1
; RV64IM-NEXT: ret
%1 = srem i32 %x, 95
%2 = sdiv i32 %x, 95
diff --git a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
index 457d0380ca8a8..22be6a4f1ef9a 100644
--- a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
+++ b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
@@ -369,9 +369,9 @@ define void @test_srem_vec(ptr %X) nounwind {
; RV32-NEXT: srli a2, a1, 31
; RV32-NEXT: andi a1, a1, 1
; RV32-NEXT: slli a1, a1, 1
+; RV32-NEXT: or a1, a2, a1
; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: or a0, a2, a0
-; RV32-NEXT: or a0, a0, a1
+; RV32-NEXT: or a0, a1, a0
; RV32-NEXT: sw a0, 8(s0)
; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
@@ -521,9 +521,9 @@ define void @test_srem_vec(ptr %X) nounwind {
; RV32M-NEXT: srli a2, a1, 31
; RV32M-NEXT: andi a1, a1, 1
; RV32M-NEXT: slli a1, a1, 1
+; RV32M-NEXT: or a1, a2, a1
; RV32M-NEXT: slli a0, a0, 2
-; RV32M-NEXT: or a0, a2, a0
-; RV32M-NEXT: or a0, a0, a1
+; RV32M-NEXT: or a0, a1, a0
; RV32M-NEXT: sw a0, 8(s0)
; RV32M-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32M-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
@@ -546,29 +546,29 @@ define void @test_srem_vec(ptr %X) nounwind {
; RV64M-NEXT: or a3, a5, a3
; RV64M-NEXT: srai a3, a3, 31
; RV64M-NEXT: slli a4, a4, 32
+; RV64M-NEXT: lui a5, %hi(.LCPI3_0)
+; RV64M-NEXT: ld a5, %lo(.LCPI3_0)(a5)
; RV64M-NEXT: or a2, a2, a4
; RV64M-NEXT: slli a2, a2, 29
-; RV64M-NEXT: lui a4, %hi(.LCPI3_0)
-; RV64M-NEXT: ld a4, %lo(.LCPI3_0)(a4)
; RV64M-NEXT: srai a2, a2, 31
-; RV64M-NEXT: slli a1, a1, 31
-; RV64M-NEXT: srai a1, a1, 31
-; RV64M-NEXT: mulh a4, a2, a4
+; RV64M-NEXT: mulh a4, a2, a5
; RV64M-NEXT: srli a5, a4, 63
; RV64M-NEXT: srai a4, a4, 1
; RV64M-NEXT: add a4, a4, a5
+; RV64M-NEXT: slli a5, a4, 2
+; RV64M-NEXT: add a4, a5, a4
; RV64M-NEXT: lui a5, %hi(.LCPI3_1)
; RV64M-NEXT: ld a5, %lo(.LCPI3_1)(a5)
-; RV64M-NEXT: add a2, a2, a4
-; RV64M-NEXT: slli a4, a4, 2
+; RV64M-NEXT: slli a1, a1, 31
+; RV64M-NEXT: srai a1, a1, 31
; RV64M-NEXT: add a2, a2, a4
; RV64M-NEXT: mulh a4, a3, a5
; RV64M-NEXT: srli a5, a4, 63
; RV64M-NEXT: srai a4, a4, 1
; RV64M-NEXT: add a4, a4, a5
; RV64M-NEXT: slli a5, a4, 3
+; RV64M-NEXT: sub a4, a4, a5
; RV64M-NEXT: add a3, a3, a4
-; RV64M-NEXT: sub a3, a3, a5
; RV64M-NEXT: addi a3, a3, -1
; RV64M-NEXT: seqz a3, a3
; RV64M-NEXT: lui a4, 699051
@@ -711,9 +711,9 @@ define void @test_srem_vec(ptr %X) nounwind {
; RV32MV-NEXT: vmv.x.s a2, v8
; RV32MV-NEXT: andi a2, a2, 1
; RV32MV-NEXT: slli a2, a2, 1
+; RV32MV-NEXT: or a1, a1, a2
; RV32MV-NEXT: slli a0, a0, 2
; RV32MV-NEXT: or a0, a1, a0
-; RV32MV-NEXT: or a0, a0, a2
; RV32MV-NEXT: sw a0, 8(s0)
; RV32MV-NEXT: csrr a0, vlenb
; RV32MV-NEXT: slli a0, a0, 1
@@ -737,22 +737,22 @@ define void @test_srem_vec(ptr %X) nounwind {
; RV64MV-NEXT: slli a1, a1, 29
; RV64MV-NEXT: srai a1, a1, 31
; RV64MV-NEXT: srli a4, a3, 2
+; RV64MV-NEXT: lui a5, %hi(.LCPI3_0)
+; RV64MV-NEXT: ld a5, %lo(.LCPI3_0)(a5)
; RV64MV-NEXT: slli a2, a2, 62
; RV64MV-NEXT: or a2, a2, a4
-; RV64MV-NEXT: lui a4, %hi(.LCPI3_0)
-; RV64MV-NEXT: ld a4, %lo(.LCPI3_0)(a4)
; RV64MV-NEXT: srai a2, a2, 31
-; RV64MV-NEXT: slli a3, a3, 31
-; RV64MV-NEXT: srai a3, a3, 31
-; RV64MV-NEXT: mulh a4, a2, a4
+; RV64MV-NEXT: mulh a4, a2, a5
; RV64MV-NEXT: srli a5, a4, 63
; RV64MV-NEXT: srai a4, a4, 1
; RV64MV-NEXT: add a4, a4, a5
+; RV64MV-NEXT: slli a5, a4, 3
+; RV64MV-NEXT: sub a4, a4, a5
; RV64MV-NEXT: lui a5, %hi(.LCPI3_1)
; RV64MV-NEXT: ld a5, %lo(.LCPI3_1)(a5)
+; RV64MV-NEXT: slli a3, a3, 31
+; RV64MV-NEXT: srai a3, a3, 31
; RV64MV-NEXT: add a2, a2, a4
-; RV64MV-NEXT: slli a4, a4, 3
-; RV64MV-NEXT: sub a2, a2, a4
; RV64MV-NEXT: mulh a4, a3, a5
; RV64MV-NEXT: srli a5, a4, 63
; RV64MV-NEXT: add a4, a4, a5
@@ -770,8 +770,8 @@ define void @test_srem_vec(ptr %X) nounwind {
; RV64MV-NEXT: srai a2, a2, 1
; RV64MV-NEXT: add a2, a2, a3
; RV64MV-NEXT: slli a3, a2, 2
+; RV64MV-NEXT: add a2, a3, a2
; RV64MV-NEXT: add a1, a1, a2
-; RV64MV-NEXT: add a1, a1, a3
; RV64MV-NEXT: vslide1down.vx v8, v8, a1
; RV64MV-NEXT: vslidedown.vi v8, v8, 1
; RV64MV-NEXT: li a1, -1
diff --git a/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll b/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll
index 7fc4713ac2d6e..629db2e29c668 100644
--- a/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll
+++ b/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll
@@ -458,32 +458,32 @@ define <4 x i16> @combine_srem_sdiv(<4 x i16> %x) nounwind {
; RV32IM-NEXT: add a6, a6, a7
; RV32IM-NEXT: li a7, 95
; RV32IM-NEXT: mul t0, a6, a7
-; RV32IM-NEXT: mulh t1, a1, a5
-; RV32IM-NEXT: add t1, t1, a1
+; RV32IM-NEXT: sub a4, a4, t0
+; RV32IM-NEXT: mulh t0, a1, a5
+; RV32IM-NEXT: add t0, t0, a1
+; RV32IM-NEXT: srli t1, t0, 31
+; RV32IM-NEXT: srai t0, t0, 6
+; RV32IM-NEXT: add t0, t0, t1
+; RV32IM-NEXT: mul t1, t0, a7
+; RV32IM-NEXT: sub a1, a1, t1
+; RV32IM-NEXT: mulh t1, a3, a5
+; RV32IM-NEXT: add t1, t1, a3
; RV32IM-NEXT: srli t2, t1, 31
; RV32IM-NEXT: srai t1, t1, 6
; RV32IM-NEXT: add t1, t1, t2
; RV32IM-NEXT: mul t2, t1, a7
-; RV32IM-NEXT: mulh t3, a3, a5
-; RV32IM-NEXT: add t3, t3, a3
-; RV32IM-NEXT: srli t4, t3, 31
-; RV32IM-NEXT: srai t3, t3, 6
-; RV32IM-NEXT: add t3, t3, t4
-; RV32IM-NEXT: mul t4, t3, a7
+; RV32IM-NEXT: sub a3, a3, t2
; RV32IM-NEXT: mulh a5, a2, a5
; RV32IM-NEXT: add a5, a5, a2
-; RV32IM-NEXT: srli t5, a5, 31
+; RV32IM-NEXT: srli t2, a5, 31
; RV32IM-NEXT: srai a5, a5, 6
-; RV32IM-NEXT: add a5, a5, t5
+; RV32IM-NEXT: add a5, a5, t2
; RV32IM-NEXT: mul a7, a5, a7
-; RV32IM-NEXT: add a2, a2, a5
; RV32IM-NEXT: sub a2, a2, a7
-; RV32IM-NEXT: add a3, a3, t3
-; RV32IM-NEXT: sub a3, a3, t4
-; RV32IM-NEXT: add a1, a1, t1
-; RV32IM-NEXT: sub a1, a1, t2
+; RV32IM-NEXT: add a2, a2, a5
+; RV32IM-NEXT: add a3, a3, t1
+; RV32IM-NEXT: add a1, a1, t0
; RV32IM-NEXT: add a4, a4, a6
-; RV32IM-NEXT: sub a4, a4, t0
; RV32IM-NEXT: sh a4, 6(a0)
; RV32IM-NEXT: sh a1, 4(a0)
; RV32IM-NEXT: sh a3, 2(a0)
@@ -575,35 +575,35 @@ define <4 x i16> @combine_srem_sdiv(<4 x i16> %x) nounwind {
; RV64IM-NEXT: add a6, a6, a7
; RV64IM-NEXT: li a7, 95
; RV64IM-NEXT: mul t0, a6, a7
-; RV64IM-NEXT: mulh t1, a1, a3
-; RV64IM-NEXT: add t1, t1, a1
+; RV64IM-NEXT: subw a2, a2, t0
+; RV64IM-NEXT: mulh t0, a1, a3
+; RV64IM-NEXT: add t0, t0, a1
+; RV64IM-NEXT: srli t1, t0, 63
+; RV64IM-NEXT: srai t0, t0, 6
+; RV64IM-NEXT: add t0, t0, t1
+; RV64IM-NEXT: mul t1, t0, a7
+; RV64IM-NEXT: subw a1, a1, t1
+; RV64IM-NEXT: mulh t1, a5, a3
+; RV64IM-NEXT: add t1, t1, a5
; RV64IM-NEXT: srli t2, t1, 63
; RV64IM-NEXT: srai t1, t1, 6
; RV64IM-NEXT: add t1, t1, t2
; RV64IM-NEXT: mul t2, t1, a7
-; RV64IM-NEXT: mulh t3, a5, a3
-; RV64IM-NEXT: add t3, t3, a5
-; RV64IM-NEXT: srli t4, t3, 63
-; RV64IM-NEXT: srai t3, t3, 6
-; RV64IM-NEXT: add t3, t3, t4
-; RV64IM-NEXT: mul t4, t3, a7
+; RV64IM-NEXT: subw a5, a5, t2
; RV64IM-NEXT: mulh a3, a4, a3
; RV64IM-NEXT: add a3, a3, a4
-; RV64IM-NEXT: srli t5, a3, 63
+; RV64IM-NEXT: srli t2, a3, 63
; RV64IM-NEXT: srai a3, a3, 6
-; RV64IM-NEXT: add a3, a3, t5
+; RV64IM-NEXT: add a3, a3, t2
; RV64IM-NEXT: mul a7, a3, a7
+; RV64IM-NEXT: subw a4, a4, a7
; RV64IM-NEXT: add a3, a4, a3
-; RV64IM-NEXT: subw a3, a3, a7
-; RV64IM-NEXT: add a5, a5, t3
-; RV64IM-NEXT: subw a4, a5, t4
-; RV64IM-NEXT: add a1, a1, t1
-; RV64IM-NEXT: subw a1, a1, t2
+; RV64IM-NEXT: add a5, a5, t1
+; RV64IM-NEXT: add a1, a1, t0
; RV64IM-NEXT: add a2, a2, a6
-; RV64IM-NEXT: subw a2, a2, t0
; RV64IM-NEXT: sh a2, 6(a0)
; RV64IM-NEXT: sh a1, 4(a0)
-; RV64IM-NEXT: sh a4, 2(a0)
+; RV64IM-NEXT: sh a5, 2(a0)
; RV64IM-NEXT: sh a3, 0(a0)
; RV64IM-NEXT: ret
%1 = srem <4 x i16> %x, <i16 95, i16 95, i16 95, i16 95>
diff --git a/llvm/test/CodeGen/RISCV/urem-lkk.ll b/llvm/test/CodeGen/RISCV/urem-lkk.ll
index f83a933c0b5c8..92aa7a5c58db5 100644
--- a/llvm/test/CodeGen/RISCV/urem-lkk.ll
+++ b/llvm/test/CodeGen/RISCV/urem-lkk.ll
@@ -140,8 +140,8 @@ define i32 @combine_urem_udiv(i32 %x) nounwind {
; RV32IM-NEXT: srli a1, a1, 6
; RV32IM-NEXT: li a2, 95
; RV32IM-NEXT: mul a2, a1, a2
-; RV32IM-NEXT: add a0, a0, a1
; RV32IM-NEXT: sub a0, a0, a2
+; RV32IM-NEXT: add a0, a0, a1
; RV32IM-NEXT: ret
;
; RV64I-LABEL: combine_urem_udiv:
@@ -180,8 +180,8 @@ define i32 @combine_urem_udiv(i32 %x) nounwind {
; RV64IM-NEXT: srli a1, a1, 6
; RV64IM-NEXT: li a2, 95
; RV64IM-NEXT: mul a2, a1, a2
-; RV64IM-NEXT: add a0, a0, a1
; RV64IM-NEXT: subw a0, a0, a2
+; RV64IM-NEXT: addw a0, a0, a1
; RV64IM-NEXT: ret
%1 = urem i32 %x, 95
%2 = udiv i32 %x, 95
diff --git a/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll b/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll
index c016e8f316363..39125cb53edaa 100644
--- a/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll
+++ b/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll
@@ -192,8 +192,8 @@ define i1 @test_urem_odd_setne(i4 %X) nounwind {
; RV32-LABEL: test_urem_odd_setne:
; RV32: # %bb.0:
; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
; RV32-NEXT: neg a0, a0
-; RV32-NEXT: sub a0, a0, a1
; RV32-NEXT: andi a0, a0, 15
; RV32-NEXT: sltiu a0, a0, 4
; RV32-NEXT: xori a0, a0, 1
@@ -202,8 +202,8 @@ define i1 @test_urem_odd_setne(i4 %X) nounwind {
; RV64-LABEL: test_urem_odd_setne:
; RV64: # %bb.0:
; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
; RV64-NEXT: negw a0, a0
-; RV64-NEXT: subw a0, a0, a1
; RV64-NEXT: andi a0, a0, 15
; RV64-NEXT: sltiu a0, a0, 4
; RV64-NEXT: xori a0, a0, 1
@@ -212,8 +212,8 @@ define i1 @test_urem_odd_setne(i4 %X) nounwind {
; RV32M-LABEL: test_urem_odd_setne:
; RV32M: # %bb.0:
; RV32M-NEXT: slli a1, a0, 1
+; RV32M-NEXT: add a0, a1, a0
; RV32M-NEXT: neg a0, a0
-; RV32M-NEXT: sub a0, a0, a1
; RV32M-NEXT: andi a0, a0, 15
; RV32M-NEXT: sltiu a0, a0, 4
; RV32M-NEXT: xori a0, a0, 1
@@ -222,8 +222,8 @@ define i1 @test_urem_odd_setne(i4 %X) nounwind {
; RV64M-LABEL: test_urem_odd_setne:
; RV64M: # %bb.0:
; RV64M-NEXT: slli a1, a0, 1
+; RV64M-NEXT: add a0, a1, a0
; RV64M-NEXT: negw a0, a0
-; RV64M-NEXT: subw a0, a0, a1
; RV64M-NEXT: andi a0, a0, 15
; RV64M-NEXT: sltiu a0, a0, 4
; RV64M-NEXT: xori a0, a0, 1
@@ -232,8 +232,8 @@ define i1 @test_urem_odd_setne(i4 %X) nounwind {
; RV32MV-LABEL: test_urem_odd_setne:
; RV32MV: # %bb.0:
; RV32MV-NEXT: slli a1, a0, 1
+; RV32MV-NEXT: add a0, a1, a0
; RV32MV-NEXT: neg a0, a0
-; RV32MV-NEXT: sub a0, a0, a1
; RV32MV-NEXT: andi a0, a0, 15
; RV32MV-NEXT: sltiu a0, a0, 4
; RV32MV-NEXT: xori a0, a0, 1
@@ -242,8 +242,8 @@ define i1 @test_urem_odd_setne(i4 %X) nounwind {
; RV64MV-LABEL: test_urem_odd_setne:
; RV64MV: # %bb.0:
; RV64MV-NEXT: slli a1, a0, 1
+; RV64MV-NEXT: add a0, a1, a0
; RV64MV-NEXT: negw a0, a0
-; RV64MV-NEXT: subw a0, a0, a1
; RV64MV-NEXT: andi a0, a0, 15
; RV64MV-NEXT: sltiu a0, a0, 4
; RV64MV-NEXT: xori a0, a0, 1
@@ -366,9 +366,9 @@ define void @test_urem_vec(ptr %X) nounwind {
; RV32-NEXT: andi a1, s3, 2047
; RV32-NEXT: andi a0, a0, 2047
; RV32-NEXT: slli a0, a0, 11
+; RV32-NEXT: or a0, a1, a0
; RV32-NEXT: slli s1, s1, 22
; RV32-NEXT: or a0, a0, s1
-; RV32-NEXT: or a0, a1, a0
; RV32-NEXT: sw a0, 0(s0)
; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
@@ -420,8 +420,8 @@ define void @test_urem_vec(ptr %X) nounwind {
; RV64-NEXT: andi a1, s3, 2047
; RV64-NEXT: andi a2, s2, 2047
; RV64-NEXT: slli a2, a2, 11
+; RV64-NEXT: or a1, a1, a2
; RV64-NEXT: slli a0, a0, 22
-; RV64-NEXT: or a0, a2, a0
; RV64-NEXT: or a0, a1, a0
; RV64-NEXT: sw a0, 0(s0)
; RV64-NEXT: slli a0, a0, 31
@@ -471,8 +471,8 @@ define void @test_urem_vec(ptr %X) nounwind {
; RV32M-NEXT: andi a2, a2, 2047
; RV32M-NEXT: andi a3, a3, 2047
; RV32M-NEXT: slli a3, a3, 11
+; RV32M-NEXT: or a2, a2, a3
; RV32M-NEXT: slli a1, a1, 22
-; RV32M-NEXT: or a1, a3, a1
; RV32M-NEXT: or a1, a2, a1
; RV32M-NEXT: sw a1, 0(a0)
; RV32M-NEXT: ret
@@ -510,8 +510,8 @@ define void @test_urem_vec(ptr %X) nounwind {
; RV64M-NEXT: andi a1, a1, 2047
; RV64M-NEXT: andi a3, a3, 2047
; RV64M-NEXT: slli a3, a3, 11
+; RV64M-NEXT: or a1, a1, a3
; RV64M-NEXT: slli a2, a2, 22
-; RV64M-NEXT: or a2, a3, a2
; RV64M-NEXT: or a1, a1, a2
; RV64M-NEXT: sw a1, 0(a0)
; RV64M-NEXT: slli a1, a1, 31
@@ -575,9 +575,9 @@ define void @test_urem_vec(ptr %X) nounwind {
; RV32MV-NEXT: vmv.x.s a3, v8
; RV32MV-NEXT: andi a3, a3, 2047
; RV32MV-NEXT: slli a3, a3, 11
+; RV32MV-NEXT: or a2, a2, a3
; RV32MV-NEXT: slli a1, a1, 22
; RV32MV-NEXT: or a1, a2, a1
-; RV32MV-NEXT: or a1, a1, a3
; RV32MV-NEXT: sw a1, 0(a0)
; RV32MV-NEXT: ret
;
@@ -631,10 +631,10 @@ define void @test_urem_vec(ptr %X) nounwind {
; RV64MV-NEXT: vmv.x.s a2, v9
; RV64MV-NEXT: andi a2, a2, 2047
; RV64MV-NEXT: slli a2, a2, 11
+; RV64MV-NEXT: or a1, a1, a2
; RV64MV-NEXT: vslidedown.vi v8, v8, 2
-; RV64MV-NEXT: vmv.x.s a3, v8
-; RV64MV-NEXT: slli a3, a3, 22
-; RV64MV-NEXT: or a1, a1, a3
+; RV64MV-NEXT: vmv.x.s a2, v8
+; RV64MV-NEXT: slli a2, a2, 22
; RV64MV-NEXT: or a1, a1, a2
; RV64MV-NEXT: sw a1, 0(a0)
; RV64MV-NEXT: slli a1, a1, 31
diff --git a/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll b/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll
index c057c656e0fb7..346ae21c85dda 100644
--- a/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll
+++ b/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll
@@ -397,20 +397,20 @@ define <4 x i16> @combine_urem_udiv(<4 x i16> %x) nounwind {
; RV32IM-NEXT: mulhu a6, a4, a5
; RV32IM-NEXT: li a7, 95
; RV32IM-NEXT: mul t0, a6, a7
-; RV32IM-NEXT: mulhu t1, a1, a5
+; RV32IM-NEXT: sub a4, a4, t0
+; RV32IM-NEXT: mulhu t0, a1, a5
+; RV32IM-NEXT: mul t1, t0, a7
+; RV32IM-NEXT: sub a1, a1, t1
+; RV32IM-NEXT: mulhu t1, a3, a5
; RV32IM-NEXT: mul t2, t1, a7
-; RV32IM-NEXT: mulhu t3, a3, a5
-; RV32IM-NEXT: mul t4, t3, a7
+; RV32IM-NEXT: sub a3, a3, t2
; RV32IM-NEXT: mulhu a5, a2, a5
; RV32IM-NEXT: mul a7, a5, a7
-; RV32IM-NEXT: add a2, a2, a5
; RV32IM-NEXT: sub a2, a2, a7
-; RV32IM-NEXT: add a3, a3, t3
-; RV32IM-NEXT: sub a3, a3, t4
-; RV32IM-NEXT: add a1, a1, t1
-; RV32IM-NEXT: sub a1, a1, t2
+; RV32IM-NEXT: add a2, a2, a5
+; RV32IM-NEXT: add a3, a3, t1
+; RV32IM-NEXT: add a1, a1, t0
; RV32IM-NEXT: add a4, a4, a6
-; RV32IM-NEXT: sub a4, a4, t0
; RV32IM-NEXT: sh a4, 6(a0)
; RV32IM-NEXT: sh a1, 4(a0)
; RV32IM-NEXT: sh a3, 2(a0)
@@ -498,23 +498,23 @@ define <4 x i16> @combine_urem_udiv(<4 x i16> %x) nounwind {
; RV64IM-NEXT: mulhu a6, a2, a3
; RV64IM-NEXT: li a7, 95
; RV64IM-NEXT: mul t0, a6, a7
-; RV64IM-NEXT: mulhu t1, a1, a3
+; RV64IM-NEXT: subw a2, a2, t0
+; RV64IM-NEXT: mulhu t0, a1, a3
+; RV64IM-NEXT: mul t1, t0, a7
+; RV64IM-NEXT: subw a1, a1, t1
+; RV64IM-NEXT: mulhu t1, a5, a3
; RV64IM-NEXT: mul t2, t1, a7
-; RV64IM-NEXT: mulhu t3, a5, a3
-; RV64IM-NEXT: mul t4, t3, a7
+; RV64IM-NEXT: subw a5, a5, t2
; RV64IM-NEXT: mulhu a3, a4, a3
; RV64IM-NEXT: mul a7, a3, a7
+; RV64IM-NEXT: subw a4, a4, a7
; RV64IM-NEXT: add a3, a4, a3
-; RV64IM-NEXT: subw a3, a3, a7
-; RV64IM-NEXT: add a5, a5, t3
-; RV64IM-NEXT: subw a4, a5, t4
-; RV64IM-NEXT: add a1, a1, t1
-; RV64IM-NEXT: subw a1, a1, t2
+; RV64IM-NEXT: add a5, a5, t1
+; RV64IM-NEXT: add a1, a1, t0
; RV64IM-NEXT: add a2, a2, a6
-; RV64IM-NEXT: subw a2, a2, t0
; RV64IM-NEXT: sh a2, 6(a0)
; RV64IM-NEXT: sh a1, 4(a0)
-; RV64IM-NEXT: sh a4, 2(a0)
+; RV64IM-NEXT: sh a5, 2(a0)
; RV64IM-NEXT: sh a3, 0(a0)
; RV64IM-NEXT: ret
%1 = urem <4 x i16> %x, <i16 95, i16 95, i16 95, i16 95>
diff --git a/llvm/test/CodeGen/RISCV/xaluo.ll b/llvm/test/CodeGen/RISCV/xaluo.ll
index b1efe53290e8e..de046ffb1ce09 100644
--- a/llvm/test/CodeGen/RISCV/xaluo.ll
+++ b/llvm/test/CodeGen/RISCV/xaluo.ll
@@ -5155,8 +5155,8 @@ define zeroext i1 @smulo2.br.i64(i64 %v1) {
; RV32-NEXT: add a4, a4, t1
; RV32-NEXT: sltu a5, t3, a5
; RV32-NEXT: mulh a2, t2, a2
-; RV32-NEXT: add a0, a0, a1
; RV32-NEXT: sub a0, t0, a0
+; RV32-NEXT: sub a0, a0, a1
; RV32-NEXT: add a0, a0, a2
; RV32-NEXT: add a0, a0, a5
; RV32-NEXT: add a0, a4, a0
@@ -5217,8 +5217,8 @@ define zeroext i1 @smulo2.br.i64(i64 %v1) {
; RV32ZBA-NEXT: add a4, a4, t1
; RV32ZBA-NEXT: sltu a5, t3, a5
; RV32ZBA-NEXT: mulh a2, t2, a2
-; RV32ZBA-NEXT: add a0, a0, a1
; RV32ZBA-NEXT: sub a0, t0, a0
+; RV32ZBA-NEXT: sub a0, a0, a1
; RV32ZBA-NEXT: add a0, a0, a2
; RV32ZBA-NEXT: add a0, a0, a5
; RV32ZBA-NEXT: add a0, a4, a0
@@ -5279,8 +5279,8 @@ define zeroext i1 @smulo2.br.i64(i64 %v1) {
; RV32ZICOND-NEXT: add a4, a4, t1
; RV32ZICOND-NEXT: sltu a5, t3, a5
; RV32ZICOND-NEXT: mulh a2, t2, a2
-; RV32ZICOND-NEXT: add a0, a0, a1
; RV32ZICOND-NEXT: sub a0, t0, a0
+; RV32ZICOND-NEXT: sub a0, a0, a1
; RV32ZICOND-NEXT: add a0, a0, a2
; RV32ZICOND-NEXT: add a0, a0, a5
; RV32ZICOND-NEXT: add a0, a4, a0
diff --git a/llvm/test/CodeGen/RISCV/xtheadmac.ll b/llvm/test/CodeGen/RISCV/xtheadmac.ll
index 992c88e3e6268..3d48e7675be70 100644
--- a/llvm/test/CodeGen/RISCV/xtheadmac.ll
+++ b/llvm/test/CodeGen/RISCV/xtheadmac.ll
@@ -43,8 +43,8 @@ define i64 @mula_i64(i64 %a, i64 %b, i64 %c) {
; RV32XTHEADMAC-NEXT: mv a3, a0
; RV32XTHEADMAC-NEXT: th.mula a3, a2, a4
; RV32XTHEADMAC-NEXT: sltu a0, a3, a0
-; RV32XTHEADMAC-NEXT: add a0, a1, a0
-; RV32XTHEADMAC-NEXT: add a1, a0, a6
+; RV32XTHEADMAC-NEXT: add a1, a1, a6
+; RV32XTHEADMAC-NEXT: add a1, a1, a0
; RV32XTHEADMAC-NEXT: mv a0, a3
; RV32XTHEADMAC-NEXT: ret
;
@@ -102,8 +102,8 @@ define i64 @muls_i64(i64 %a, i64 %b, i64 %c) {
; RV32XTHEADMAC-NEXT: mul a3, a2, a4
; RV32XTHEADMAC-NEXT: sltu a3, a0, a3
; RV32XTHEADMAC-NEXT: th.muls a0, a2, a4
-; RV32XTHEADMAC-NEXT: sub a1, a1, a3
; RV32XTHEADMAC-NEXT: sub a1, a1, a6
+; RV32XTHEADMAC-NEXT: sub a1, a1, a3
; RV32XTHEADMAC-NEXT: ret
;
; RV64XTHEADMAC-LABEL: muls_i64:
diff --git a/llvm/test/CodeGen/RISCV/xtheadmemidx.ll b/llvm/test/CodeGen/RISCV/xtheadmemidx.ll
index 46aa383866e93..09b8d7fce5855 100644
--- a/llvm/test/CodeGen/RISCV/xtheadmemidx.ll
+++ b/llvm/test/CodeGen/RISCV/xtheadmemidx.ll
@@ -324,8 +324,8 @@ define ptr @ldib(ptr %base, i64 %a) {
; RV32XTHEADMEMIDX-NEXT: lw a4, 4(a0)
; RV32XTHEADMEMIDX-NEXT: add a1, a3, a1
; RV32XTHEADMEMIDX-NEXT: sltu a3, a1, a3
-; RV32XTHEADMEMIDX-NEXT: add a2, a2, a3
; RV32XTHEADMEMIDX-NEXT: add a2, a4, a2
+; RV32XTHEADMEMIDX-NEXT: add a2, a2, a3
; RV32XTHEADMEMIDX-NEXT: sw a1, 8(a0)
; RV32XTHEADMEMIDX-NEXT: sw a2, 12(a0)
; RV32XTHEADMEMIDX-NEXT: ret
More information about the llvm-commits
mailing list