[llvm] f8c6812 - [RISCV] Enable the Machine Late Cleanup pass.
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Sun Dec 11 21:07:07 PST 2022
Author: Craig Topper
Date: 2022-12-11T20:55:05-08:00
New Revision: f8c681227fa740ca9e34052fc9c8f3d42d488dc7
URL: https://github.com/llvm/llvm-project/commit/f8c681227fa740ca9e34052fc9c8f3d42d488dc7
DIFF: https://github.com/llvm/llvm-project/commit/f8c681227fa740ca9e34052fc9c8f3d42d488dc7.diff
LOG: [RISCV] Enable the Machine Late Cleanup pass.
Believe the bug has been fixed with D139169
Reviewed By: asb
Differential Revision: https://reviews.llvm.org/D139753
Added:
Modified:
llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
llvm/test/CodeGen/RISCV/O3-pipeline.ll
llvm/test/CodeGen/RISCV/branch-relaxation.ll
llvm/test/CodeGen/RISCV/out-of-reach-emergency-slot.mir
llvm/test/CodeGen/RISCV/rvv/bitreverse-sdnode.ll
llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll
llvm/test/CodeGen/RISCV/rvv/bswap-sdnode.ll
llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll
llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll
llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll
llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap-vp.ll
llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll
llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll
llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll
llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll
llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll
llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll
llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwadd.ll
llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwmul.ll
llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwsub.ll
llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll
llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwadd.ll
llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll
llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll
llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulsu.ll
llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll
llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsub.ll
llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsubu.ll
llvm/test/CodeGen/RISCV/rvv/floor-vp.ll
llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll
llvm/test/CodeGen/RISCV/rvv/rint-vp.ll
llvm/test/CodeGen/RISCV/rvv/round-vp.ll
llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll
llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll
llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector.ll
llvm/test/CodeGen/RISCV/rvv/rv32-spill-zvlsseg.ll
llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector.ll
llvm/test/CodeGen/RISCV/rvv/rv64-spill-zvlsseg.ll
llvm/test/CodeGen/RISCV/stack-realignment.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
index 43e8c7f551ea1..dff8ccccc3a60 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
+++ b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
@@ -286,10 +286,6 @@ void RISCVPassConfig::addPreRegAlloc() {
void RISCVPassConfig::addPostRegAlloc() {
if (TM->getOptLevel() != CodeGenOpt::None && EnableRedundantCopyElimination)
addPass(createRISCVRedundantCopyEliminationPass());
-
- // Temporarily disabled until post-RA pseudo expansion problem is fixed,
- // see D123394 and D139169.
- disablePass(&MachineLateInstrsCleanupID);
}
yaml::MachineFunctionInfo *
diff --git a/llvm/test/CodeGen/RISCV/O3-pipeline.ll b/llvm/test/CodeGen/RISCV/O3-pipeline.ll
index 64ffdb75b0eb3..af3efb1599a9f 100644
--- a/llvm/test/CodeGen/RISCV/O3-pipeline.ll
+++ b/llvm/test/CodeGen/RISCV/O3-pipeline.ll
@@ -144,6 +144,7 @@
; CHECK-NEXT: Machine Optimization Remark Emitter
; CHECK-NEXT: Shrink Wrapping analysis
; CHECK-NEXT: Prologue/Epilogue Insertion & Frame Finalization
+; CHECK-NEXT: Machine Late Instructions Cleanup Pass
; CHECK-NEXT: Control Flow Optimizer
; CHECK-NEXT: Lazy Machine Block Frequency Analysis
; CHECK-NEXT: Tail Duplication
diff --git a/llvm/test/CodeGen/RISCV/branch-relaxation.ll b/llvm/test/CodeGen/RISCV/branch-relaxation.ll
index 95a72aafdca9b..cbe12187a4110 100644
--- a/llvm/test/CodeGen/RISCV/branch-relaxation.ll
+++ b/llvm/test/CodeGen/RISCV/branch-relaxation.ll
@@ -826,7 +826,6 @@ define void @relax_jal_spill_32_adjust_spill_slot() {
; CHECK-RV32-NEXT: #NO_APP
; CHECK-RV32-NEXT: lui a0, 2
; CHECK-RV32-NEXT: sub sp, s0, a0
-; CHECK-RV32-NEXT: lui a0, 2
; CHECK-RV32-NEXT: addi a0, a0, -2032
; CHECK-RV32-NEXT: add sp, sp, a0
; CHECK-RV32-NEXT: lw ra, 2028(sp) # 4-byte Folded Reload
@@ -1076,7 +1075,6 @@ define void @relax_jal_spill_32_adjust_spill_slot() {
; CHECK-RV64-NEXT: #NO_APP
; CHECK-RV64-NEXT: lui a0, 2
; CHECK-RV64-NEXT: sub sp, s0, a0
-; CHECK-RV64-NEXT: lui a0, 2
; CHECK-RV64-NEXT: addiw a0, a0, -2032
; CHECK-RV64-NEXT: add sp, sp, a0
; CHECK-RV64-NEXT: ld ra, 2024(sp) # 8-byte Folded Reload
@@ -2323,7 +2321,6 @@ define void @relax_jal_spill_64_adjust_spill_slot() {
; CHECK-RV32-NEXT: #NO_APP
; CHECK-RV32-NEXT: lui a0, 2
; CHECK-RV32-NEXT: sub sp, s0, a0
-; CHECK-RV32-NEXT: lui a0, 2
; CHECK-RV32-NEXT: addi a0, a0, -2032
; CHECK-RV32-NEXT: add sp, sp, a0
; CHECK-RV32-NEXT: lw ra, 2028(sp) # 4-byte Folded Reload
@@ -2561,7 +2558,6 @@ define void @relax_jal_spill_64_adjust_spill_slot() {
; CHECK-RV64-NEXT: #NO_APP
; CHECK-RV64-NEXT: lui a0, 2
; CHECK-RV64-NEXT: sub sp, s0, a0
-; CHECK-RV64-NEXT: lui a0, 2
; CHECK-RV64-NEXT: addiw a0, a0, -2032
; CHECK-RV64-NEXT: add sp, sp, a0
; CHECK-RV64-NEXT: ld ra, 2024(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/out-of-reach-emergency-slot.mir b/llvm/test/CodeGen/RISCV/out-of-reach-emergency-slot.mir
index 9ada644e9da19..dbc25d3ee4352 100644
--- a/llvm/test/CodeGen/RISCV/out-of-reach-emergency-slot.mir
+++ b/llvm/test/CodeGen/RISCV/out-of-reach-emergency-slot.mir
@@ -43,7 +43,6 @@
; CHECK-NEXT: call foo at plt
; CHECK-NEXT: lui a0, 2
; CHECK-NEXT: sub sp, s0, a0
- ; CHECK-NEXT: lui a0, 2
; CHECK-NEXT: addiw a0, a0, -2032
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: ld ra, 2024(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/rvv/bitreverse-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/bitreverse-sdnode.ll
index 8cd72ddb7e25d..463e4e5426d9f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/bitreverse-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/bitreverse-sdnode.ll
@@ -1394,7 +1394,6 @@ define <vscale x 8 x i64> @bitreverse_nxv8i64(<vscale x 8 x i64> %va) {
; RV32-NEXT: addi a5, sp, 16
; RV32-NEXT: vl8r.v v0, (a5) # Unknown-size Folded Reload
; RV32-NEXT: vor.vv v16, v16, v0
-; RV32-NEXT: addi a5, sp, 16
; RV32-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill
; RV32-NEXT: vand.vx v0, v8, a2
; RV32-NEXT: vsll.vx v0, v0, a1
diff --git a/llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll b/llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll
index ec9792ce8089e..8dfda16f508a2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll
@@ -2943,7 +2943,6 @@ define <vscale x 7 x i64> @vp_bitreverse_nxv7i64(<vscale x 7 x i64> %va, <vscale
; RV64-NEXT: addi a5, sp, 16
; RV64-NEXT: vl8r.v v24, (a5) # Unknown-size Folded Reload
; RV64-NEXT: vor.vv v16, v16, v24, v0.t
-; RV64-NEXT: addi a5, sp, 16
; RV64-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill
; RV64-NEXT: vsrl.vx v24, v8, a2, v0.t
; RV64-NEXT: vsrl.vx v16, v8, a4, v0.t
@@ -3036,7 +3035,6 @@ define <vscale x 7 x i64> @vp_bitreverse_nxv7i64_unmasked(<vscale x 7 x i64> %va
; RV32-NEXT: addi a6, sp, 16
; RV32-NEXT: vl8r.v v0, (a6) # Unknown-size Folded Reload
; RV32-NEXT: vor.vv v24, v0, v24
-; RV32-NEXT: addi a6, sp, 16
; RV32-NEXT: vs8r.v v24, (a6) # Unknown-size Folded Spill
; RV32-NEXT: vsrl.vx v0, v8, a3
; RV32-NEXT: vand.vx v0, v0, a2
@@ -3297,7 +3295,6 @@ define <vscale x 8 x i64> @vp_bitreverse_nxv8i64(<vscale x 8 x i64> %va, <vscale
; RV64-NEXT: addi a5, sp, 16
; RV64-NEXT: vl8r.v v24, (a5) # Unknown-size Folded Reload
; RV64-NEXT: vor.vv v16, v16, v24, v0.t
-; RV64-NEXT: addi a5, sp, 16
; RV64-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill
; RV64-NEXT: vsrl.vx v24, v8, a2, v0.t
; RV64-NEXT: vsrl.vx v16, v8, a4, v0.t
@@ -3390,7 +3387,6 @@ define <vscale x 8 x i64> @vp_bitreverse_nxv8i64_unmasked(<vscale x 8 x i64> %va
; RV32-NEXT: addi a6, sp, 16
; RV32-NEXT: vl8r.v v0, (a6) # Unknown-size Folded Reload
; RV32-NEXT: vor.vv v24, v0, v24
-; RV32-NEXT: addi a6, sp, 16
; RV32-NEXT: vs8r.v v24, (a6) # Unknown-size Folded Spill
; RV32-NEXT: vsrl.vx v0, v8, a3
; RV32-NEXT: vand.vx v0, v0, a2
diff --git a/llvm/test/CodeGen/RISCV/rvv/bswap-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/bswap-sdnode.ll
index b20b24da0f5c4..c2c6e57785029 100644
--- a/llvm/test/CodeGen/RISCV/rvv/bswap-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/bswap-sdnode.ll
@@ -530,7 +530,6 @@ define <vscale x 8 x i64> @bswap_nxv8i64(<vscale x 8 x i64> %va) {
; RV32-NEXT: addi a4, sp, 16
; RV32-NEXT: vl8r.v v0, (a4) # Unknown-size Folded Reload
; RV32-NEXT: vor.vv v16, v16, v0
-; RV32-NEXT: addi a4, sp, 16
; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
; RV32-NEXT: vand.vx v0, v8, a2
; RV32-NEXT: vsll.vx v0, v0, a1
diff --git a/llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll b/llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll
index eddfecad0c266..4bfd4cb39e959 100644
--- a/llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll
@@ -1153,7 +1153,6 @@ define <vscale x 7 x i64> @vp_bswap_nxv7i64(<vscale x 7 x i64> %va, <vscale x 7
; RV64-NEXT: addi a5, sp, 16
; RV64-NEXT: vl8r.v v24, (a5) # Unknown-size Folded Reload
; RV64-NEXT: vor.vv v16, v16, v24, v0.t
-; RV64-NEXT: addi a5, sp, 16
; RV64-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill
; RV64-NEXT: vsrl.vx v24, v8, a2, v0.t
; RV64-NEXT: vsrl.vx v16, v8, a4, v0.t
@@ -1213,7 +1212,6 @@ define <vscale x 7 x i64> @vp_bswap_nxv7i64_unmasked(<vscale x 7 x i64> %va, i32
; RV32-NEXT: addi a0, sp, 16
; RV32-NEXT: vl8r.v v0, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vor.vv v24, v0, v24
-; RV32-NEXT: addi a0, sp, 16
; RV32-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; RV32-NEXT: vsrl.vx v0, v8, a3
; RV32-NEXT: vand.vx v0, v0, a2
@@ -1225,7 +1223,6 @@ define <vscale x 7 x i64> @vp_bswap_nxv7i64_unmasked(<vscale x 7 x i64> %va, i32
; RV32-NEXT: vand.vx v8, v8, a4
; RV32-NEXT: vor.vv v8, v16, v8
; RV32-NEXT: vor.vv v8, v8, v24
-; RV32-NEXT: addi a0, sp, 16
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vor.vv v8, v16, v8
; RV32-NEXT: csrr a0, vlenb
@@ -1393,7 +1390,6 @@ define <vscale x 8 x i64> @vp_bswap_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8
; RV64-NEXT: addi a5, sp, 16
; RV64-NEXT: vl8r.v v24, (a5) # Unknown-size Folded Reload
; RV64-NEXT: vor.vv v16, v16, v24, v0.t
-; RV64-NEXT: addi a5, sp, 16
; RV64-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill
; RV64-NEXT: vsrl.vx v24, v8, a2, v0.t
; RV64-NEXT: vsrl.vx v16, v8, a4, v0.t
@@ -1453,7 +1449,6 @@ define <vscale x 8 x i64> @vp_bswap_nxv8i64_unmasked(<vscale x 8 x i64> %va, i32
; RV32-NEXT: addi a0, sp, 16
; RV32-NEXT: vl8r.v v0, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vor.vv v24, v0, v24
-; RV32-NEXT: addi a0, sp, 16
; RV32-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; RV32-NEXT: vsrl.vx v0, v8, a3
; RV32-NEXT: vand.vx v0, v0, a2
@@ -1465,7 +1460,6 @@ define <vscale x 8 x i64> @vp_bswap_nxv8i64_unmasked(<vscale x 8 x i64> %va, i32
; RV32-NEXT: vand.vx v8, v8, a4
; RV32-NEXT: vor.vv v8, v16, v8
; RV32-NEXT: vor.vv v8, v8, v24
-; RV32-NEXT: addi a0, sp, 16
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vor.vv v8, v16, v8
; RV32-NEXT: csrr a0, vlenb
diff --git a/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll b/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll
index b194fbfb1fe0e..b23ab9a50b5e5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll
@@ -765,10 +765,8 @@ define <vscale x 16 x double> @vp_ceil_vv_nxv16f64(<vscale x 16 x double> %va, <
; CHECK-NEXT: addi a2, sp, 16
; CHECK-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: addi a2, sp, 16
; CHECK-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload
; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t
-; CHECK-NEXT: addi a2, sp, 16
; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
; CHECK-NEXT: bltu a0, a1, .LBB32_2
; CHECK-NEXT: # %bb.1:
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll
index 4c74ff4310a12..eb45b3cf33544 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll
@@ -2338,7 +2338,6 @@ define <15 x i64> @vp_bitreverse_v15i64(<15 x i64> %va, <15 x i1> %m, i32 zeroex
; RV64-NEXT: addi a5, sp, 16
; RV64-NEXT: vl8r.v v24, (a5) # Unknown-size Folded Reload
; RV64-NEXT: vor.vv v16, v16, v24, v0.t
-; RV64-NEXT: addi a5, sp, 16
; RV64-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill
; RV64-NEXT: vsrl.vx v24, v8, a2, v0.t
; RV64-NEXT: vsrl.vx v16, v8, a4, v0.t
@@ -2422,7 +2421,6 @@ define <15 x i64> @vp_bitreverse_v15i64_unmasked(<15 x i64> %va, i32 zeroext %ev
; RV32-NEXT: addi a7, sp, 16
; RV32-NEXT: vl8r.v v0, (a7) # Unknown-size Folded Reload
; RV32-NEXT: vor.vv v24, v24, v0
-; RV32-NEXT: addi a7, sp, 16
; RV32-NEXT: vs8r.v v24, (a7) # Unknown-size Folded Spill
; RV32-NEXT: vand.vx v0, v8, a3
; RV32-NEXT: vsll.vx v0, v0, a2
@@ -2706,7 +2704,6 @@ define <16 x i64> @vp_bitreverse_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroex
; RV64-NEXT: addi a5, sp, 16
; RV64-NEXT: vl8r.v v24, (a5) # Unknown-size Folded Reload
; RV64-NEXT: vor.vv v16, v16, v24, v0.t
-; RV64-NEXT: addi a5, sp, 16
; RV64-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill
; RV64-NEXT: vsrl.vx v24, v8, a2, v0.t
; RV64-NEXT: vsrl.vx v16, v8, a4, v0.t
@@ -2790,7 +2787,6 @@ define <16 x i64> @vp_bitreverse_v16i64_unmasked(<16 x i64> %va, i32 zeroext %ev
; RV32-NEXT: addi a7, sp, 16
; RV32-NEXT: vl8r.v v0, (a7) # Unknown-size Folded Reload
; RV32-NEXT: vor.vv v24, v24, v0
-; RV32-NEXT: addi a7, sp, 16
; RV32-NEXT: vs8r.v v24, (a7) # Unknown-size Folded Spill
; RV32-NEXT: vand.vx v0, v8, a3
; RV32-NEXT: vsll.vx v0, v0, a2
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap-vp.ll
index fa480371d8920..9280565c3192e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap-vp.ll
@@ -1044,7 +1044,6 @@ define <15 x i64> @vp_bswap_v15i64(<15 x i64> %va, <15 x i1> %m, i32 zeroext %ev
; RV64-NEXT: addi a5, sp, 16
; RV64-NEXT: vl8r.v v24, (a5) # Unknown-size Folded Reload
; RV64-NEXT: vor.vv v16, v16, v24, v0.t
-; RV64-NEXT: addi a5, sp, 16
; RV64-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill
; RV64-NEXT: vsrl.vx v24, v8, a2, v0.t
; RV64-NEXT: vsrl.vx v16, v8, a4, v0.t
@@ -1107,7 +1106,6 @@ define <15 x i64> @vp_bswap_v15i64_unmasked(<15 x i64> %va, i32 zeroext %evl) {
; RV32-NEXT: addi a4, sp, 16
; RV32-NEXT: vl8r.v v0, (a4) # Unknown-size Folded Reload
; RV32-NEXT: vor.vv v24, v24, v0
-; RV32-NEXT: addi a4, sp, 16
; RV32-NEXT: vs8r.v v24, (a4) # Unknown-size Folded Spill
; RV32-NEXT: vand.vx v0, v8, a3
; RV32-NEXT: vsll.vx v0, v0, a2
@@ -1314,7 +1312,6 @@ define <16 x i64> @vp_bswap_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %ev
; RV64-NEXT: addi a5, sp, 16
; RV64-NEXT: vl8r.v v24, (a5) # Unknown-size Folded Reload
; RV64-NEXT: vor.vv v16, v16, v24, v0.t
-; RV64-NEXT: addi a5, sp, 16
; RV64-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill
; RV64-NEXT: vsrl.vx v24, v8, a2, v0.t
; RV64-NEXT: vsrl.vx v16, v8, a4, v0.t
@@ -1377,7 +1374,6 @@ define <16 x i64> @vp_bswap_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) {
; RV32-NEXT: addi a4, sp, 16
; RV32-NEXT: vl8r.v v0, (a4) # Unknown-size Folded Reload
; RV32-NEXT: vor.vv v24, v24, v0
-; RV32-NEXT: addi a4, sp, 16
; RV32-NEXT: vs8r.v v24, (a4) # Unknown-size Folded Spill
; RV32-NEXT: vand.vx v0, v8, a3
; RV32-NEXT: vsll.vx v0, v0, a2
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll
index 82deff3559155..d0c78e54d6b55 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll
@@ -712,7 +712,6 @@ define <32 x double> @vp_ceil_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroex
; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v1
-; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vmflt.vf v1, v24, ft0, v0.t
; CHECK-NEXT: fsrmi a0, 3
@@ -766,7 +765,6 @@ define <32 x double> @vp_ceil_v32f64_unmasked(<32 x double> %va, i32 zeroext %ev
; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
; CHECK-NEXT: addi a1, a0, -16
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll
index e0fb19ec9ce50..b2e708646a94e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll
@@ -712,7 +712,6 @@ define <32 x double> @vp_floor_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroe
; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v1
-; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vmflt.vf v1, v24, ft0, v0.t
; CHECK-NEXT: fsrmi a0, 2
@@ -766,7 +765,6 @@ define <32 x double> @vp_floor_v32f64_unmasked(<32 x double> %va, i32 zeroext %e
; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
; CHECK-NEXT: addi a1, a0, -16
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll
index 0c4e3f0779fec..ec78efdb692fd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll
@@ -638,7 +638,6 @@ define <32 x double> @vp_rint_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroex
; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
; CHECK-NEXT: addi a1, a0, -16
@@ -697,7 +696,6 @@ define <32 x double> @vp_rint_v32f64_unmasked(<32 x double> %va, i32 zeroext %ev
; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
; CHECK-NEXT: addi a1, a0, -16
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll
index a6f164347b9f8..9ddd9e095e58d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll
@@ -712,7 +712,6 @@ define <32 x double> @vp_round_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroe
; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v1
-; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vmflt.vf v1, v24, ft0, v0.t
; CHECK-NEXT: fsrmi a0, 4
@@ -766,7 +765,6 @@ define <32 x double> @vp_round_v32f64_unmasked(<32 x double> %va, i32 zeroext %e
; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
; CHECK-NEXT: addi a1, a0, -16
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll
index 19342f5ccc5ad..ec8e17a8a0fe8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll
@@ -712,7 +712,6 @@ define <32 x double> @vp_roundeven_v32f64(<32 x double> %va, <32 x i1> %m, i32 z
; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v1
-; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vmflt.vf v1, v24, ft0, v0.t
; CHECK-NEXT: fsrmi a0, 0
@@ -766,7 +765,6 @@ define <32 x double> @vp_roundeven_v32f64_unmasked(<32 x double> %va, i32 zeroex
; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
; CHECK-NEXT: addi a1, a0, -16
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll
index dede7f3040319..30c563ae975c5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll
@@ -712,7 +712,6 @@ define <32 x double> @vp_roundtozero_v32f64(<32 x double> %va, <32 x i1> %m, i32
; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v1
-; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vmflt.vf v1, v24, ft0, v0.t
; CHECK-NEXT: fsrmi a0, 1
@@ -766,7 +765,6 @@ define <32 x double> @vp_roundtozero_v32f64_unmasked(<32 x double> %va, i32 zero
; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
; CHECK-NEXT: addi a1, a0, -16
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwadd.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwadd.ll
index 008d5cd851f18..09869b6d8bf1b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwadd.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwadd.ll
@@ -206,7 +206,6 @@ define <32 x double> @vfwadd_v32f32(<32 x float> *%x, <32 x float> *%y) {
; CHECK-NEXT: vslidedown.vi v0, v24, 16
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; CHECK-NEXT: vfwadd.vv v8, v16, v24
-; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vfwadd.vv v16, v24, v0
; CHECK-NEXT: csrr a0, vlenb
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwmul.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwmul.ll
index 5bea56827beaa..4d1c777a7cf14 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwmul.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwmul.ll
@@ -206,7 +206,6 @@ define <32 x double> @vfwmul_v32f32(<32 x float> *%x, <32 x float> *%y) {
; CHECK-NEXT: vslidedown.vi v0, v24, 16
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; CHECK-NEXT: vfwmul.vv v8, v16, v24
-; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vfwmul.vv v16, v24, v0
; CHECK-NEXT: csrr a0, vlenb
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwsub.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwsub.ll
index f88f5574e340c..1dbdefa6b8831 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwsub.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwsub.ll
@@ -206,7 +206,6 @@ define <32 x double> @vfwsub_v32f32(<32 x float> *%x, <32 x float> *%y) {
; CHECK-NEXT: vslidedown.vi v0, v24, 16
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; CHECK-NEXT: vfwsub.vv v8, v16, v24
-; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vfwsub.vv v16, v24, v0
; CHECK-NEXT: csrr a0, vlenb
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll
index 40671d4c934e9..489c5edd5117b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll
@@ -2405,7 +2405,6 @@ define <32 x double> @vpgather_baseidx_sext_v32i32_v32f64(double* %base, <32 x i
; RV64-NEXT: vsext.vf2 v24, v8
; RV64-NEXT: vsll.vi v16, v0, 3
; RV64-NEXT: vsll.vi v8, v24, 3
-; RV64-NEXT: addi a2, sp, 16
; RV64-NEXT: vl1r.v v24, (a2) # Unknown-size Folded Reload
; RV64-NEXT: addi a2, a1, -16
; RV64-NEXT: sltu a3, a1, a2
@@ -2478,7 +2477,6 @@ define <32 x double> @vpgather_baseidx_zext_v32i32_v32f64(double* %base, <32 x i
; RV64-NEXT: vzext.vf2 v24, v8
; RV64-NEXT: vsll.vi v16, v0, 3
; RV64-NEXT: vsll.vi v8, v24, 3
-; RV64-NEXT: addi a2, sp, 16
; RV64-NEXT: vl1r.v v24, (a2) # Unknown-size Folded Reload
; RV64-NEXT: addi a2, a1, -16
; RV64-NEXT: sltu a3, a1, a2
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwadd.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwadd.ll
index 72013f55e2934..bdb4ba9b921b7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwadd.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwadd.ll
@@ -333,7 +333,6 @@ define <32 x i64> @vwadd_v32i64(<32 x i32>* %x, <32 x i32>* %y) nounwind {
; CHECK-NEXT: vslidedown.vi v0, v24, 16
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; CHECK-NEXT: vwadd.vv v8, v16, v24
-; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vwadd.vv v16, v24, v0
; CHECK-NEXT: csrr a0, vlenb
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll
index 3f101533e5e5b..341c92a1f9e43 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll
@@ -333,7 +333,6 @@ define <32 x i64> @vwaddu_v32i64(<32 x i32>* %x, <32 x i32>* %y) nounwind {
; CHECK-NEXT: vslidedown.vi v0, v24, 16
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; CHECK-NEXT: vwaddu.vv v8, v16, v24
-; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vwaddu.vv v16, v24, v0
; CHECK-NEXT: csrr a0, vlenb
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll
index 1d5f6c2ff2d8c..fbfe7fa60db21 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll
@@ -363,7 +363,6 @@ define <32 x i64> @vwmul_v32i64(<32 x i32>* %x, <32 x i32>* %y) {
; CHECK-NEXT: vslidedown.vi v0, v24, 16
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; CHECK-NEXT: vwmul.vv v8, v16, v24
-; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vwmul.vv v16, v24, v0
; CHECK-NEXT: csrr a0, vlenb
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulsu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulsu.ll
index fd298420c4a1f..09cec0ebd5e09 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulsu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulsu.ll
@@ -355,7 +355,6 @@ define <32 x i64> @vwmulsu_v32i64(<32 x i32>* %x, <32 x i32>* %y) {
; CHECK-NEXT: vslidedown.vi v0, v24, 16
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; CHECK-NEXT: vwmulsu.vv v8, v24, v16
-; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vwmulsu.vv v16, v0, v24
; CHECK-NEXT: csrr a0, vlenb
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll
index f3dab4778cc27..b55df5e1fb215 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll
@@ -339,7 +339,6 @@ define <32 x i64> @vwmulu_v32i64(<32 x i32>* %x, <32 x i32>* %y) {
; CHECK-NEXT: vslidedown.vi v0, v24, 16
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; CHECK-NEXT: vwmulu.vv v8, v16, v24
-; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vwmulu.vv v16, v24, v0
; CHECK-NEXT: csrr a0, vlenb
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsub.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsub.ll
index 0072a464985f6..0e5c2075ff3f5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsub.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsub.ll
@@ -333,7 +333,6 @@ define <32 x i64> @vwsub_v32i64(<32 x i32>* %x, <32 x i32>* %y) nounwind {
; CHECK-NEXT: vslidedown.vi v0, v24, 16
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; CHECK-NEXT: vwsub.vv v8, v16, v24
-; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vwsub.vv v16, v24, v0
; CHECK-NEXT: csrr a0, vlenb
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsubu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsubu.ll
index d2e9ca86e844b..2887c74c3068c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsubu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsubu.ll
@@ -333,7 +333,6 @@ define <32 x i64> @vwsubu_v32i64(<32 x i32>* %x, <32 x i32>* %y) nounwind {
; CHECK-NEXT: vslidedown.vi v0, v24, 16
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; CHECK-NEXT: vwsubu.vv v8, v16, v24
-; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vwsubu.vv v16, v24, v0
; CHECK-NEXT: csrr a0, vlenb
diff --git a/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll
index f5d3d92d77d9e..7ba58716cdde3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll
@@ -765,10 +765,8 @@ define <vscale x 16 x double> @vp_floor_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: addi a2, sp, 16
; CHECK-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: addi a2, sp, 16
; CHECK-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload
; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t
-; CHECK-NEXT: addi a2, sp, 16
; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
; CHECK-NEXT: bltu a0, a1, .LBB32_2
; CHECK-NEXT: # %bb.1:
diff --git a/llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll
index 867e6b8a2f704..fe897f52c2308 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll
@@ -225,7 +225,6 @@ define <vscale x 64 x i8> @fshr_v64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8>
; CHECK-NEXT: vand.vi v8, v8, 7, v0.t
; CHECK-NEXT: vsll.vv v8, v16, v8, v0.t
; CHECK-NEXT: vand.vi v16, v24, 7, v0.t
-; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vsrl.vv v16, v24, v16, v0.t
; CHECK-NEXT: vor.vv v8, v8, v16, v0.t
@@ -257,7 +256,6 @@ define <vscale x 64 x i8> @fshl_v64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8>
; CHECK-NEXT: vand.vi v8, v8, 7, v0.t
; CHECK-NEXT: vsrl.vv v8, v16, v8, v0.t
; CHECK-NEXT: vand.vi v16, v24, 7, v0.t
-; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vsll.vv v16, v24, v16, v0.t
; CHECK-NEXT: vor.vv v8, v16, v8, v0.t
@@ -459,7 +457,6 @@ define <vscale x 32 x i16> @fshr_v32i16(<vscale x 32 x i16> %a, <vscale x 32 x i
; CHECK-NEXT: vand.vi v8, v8, 15, v0.t
; CHECK-NEXT: vsll.vv v8, v16, v8, v0.t
; CHECK-NEXT: vand.vi v16, v24, 15, v0.t
-; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vsrl.vv v16, v24, v16, v0.t
; CHECK-NEXT: vor.vv v8, v8, v16, v0.t
@@ -491,7 +488,6 @@ define <vscale x 32 x i16> @fshl_v32i16(<vscale x 32 x i16> %a, <vscale x 32 x i
; CHECK-NEXT: vand.vi v8, v8, 15, v0.t
; CHECK-NEXT: vsrl.vv v8, v16, v8, v0.t
; CHECK-NEXT: vand.vi v16, v24, 15, v0.t
-; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vsll.vv v16, v24, v16, v0.t
; CHECK-NEXT: vor.vv v8, v16, v8, v0.t
@@ -1082,7 +1078,6 @@ define <vscale x 16 x i64> @fshr_v16i64(<vscale x 16 x i64> %a, <vscale x 16 x i
; CHECK-NEXT: .LBB46_2:
; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: li a2, 48
; CHECK-NEXT: mul a1, a1, a2
; CHECK-NEXT: add a1, sp, a1
@@ -1269,7 +1264,6 @@ define <vscale x 16 x i64> @fshl_v16i64(<vscale x 16 x i64> %a, <vscale x 16 x i
; CHECK-NEXT: .LBB47_2:
; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: li a2, 48
; CHECK-NEXT: mul a1, a1, a2
; CHECK-NEXT: add a1, sp, a1
diff --git a/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll
index 42d6700699d65..fe10ee71da005 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll
@@ -699,7 +699,6 @@ define <vscale x 16 x double> @vp_rint_nxv16f64(<vscale x 16 x double> %va, <vsc
; CHECK-NEXT: addi a2, sp, 16
; CHECK-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: addi a2, sp, 16
; CHECK-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload
; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t
; CHECK-NEXT: bltu a0, a1, .LBB32_2
diff --git a/llvm/test/CodeGen/RISCV/rvv/round-vp.ll b/llvm/test/CodeGen/RISCV/rvv/round-vp.ll
index 959989bf4c47d..df5c1ef611580 100644
--- a/llvm/test/CodeGen/RISCV/rvv/round-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/round-vp.ll
@@ -765,10 +765,8 @@ define <vscale x 16 x double> @vp_round_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: addi a2, sp, 16
; CHECK-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: addi a2, sp, 16
; CHECK-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload
; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t
-; CHECK-NEXT: addi a2, sp, 16
; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
; CHECK-NEXT: bltu a0, a1, .LBB32_2
; CHECK-NEXT: # %bb.1:
diff --git a/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll b/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll
index 2b323757e1674..4d81eaadda359 100644
--- a/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll
@@ -765,10 +765,8 @@ define <vscale x 16 x double> @vp_roundeven_nxv16f64(<vscale x 16 x double> %va,
; CHECK-NEXT: addi a2, sp, 16
; CHECK-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: addi a2, sp, 16
; CHECK-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload
; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t
-; CHECK-NEXT: addi a2, sp, 16
; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
; CHECK-NEXT: bltu a0, a1, .LBB32_2
; CHECK-NEXT: # %bb.1:
diff --git a/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll b/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll
index 5857290d19399..353a580aa3ce2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll
@@ -765,10 +765,8 @@ define <vscale x 16 x double> @vp_roundtozero_nxv16f64(<vscale x 16 x double> %v
; CHECK-NEXT: addi a2, sp, 16
; CHECK-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: addi a2, sp, 16
; CHECK-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload
; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t
-; CHECK-NEXT: addi a2, sp, 16
; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
; CHECK-NEXT: bltu a0, a1, .LBB32_2
; CHECK-NEXT: # %bb.1:
diff --git a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector.ll b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector.ll
index 840a796de88ff..1e3b90e306c83 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector.ll
@@ -33,7 +33,6 @@ define <vscale x 1 x i32> @spill_lmul_mf2(<vscale x 1 x i32> %va) nounwind {
; SPILL-O2-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
; SPILL-O2-NEXT: #APP
; SPILL-O2-NEXT: #NO_APP
-; SPILL-O2-NEXT: addi a0, sp, 16
; SPILL-O2-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
; SPILL-O2-NEXT: csrr a0, vlenb
; SPILL-O2-NEXT: slli a0, a0, 1
@@ -76,7 +75,6 @@ define <vscale x 2 x i32> @spill_lmul_1(<vscale x 2 x i32> %va) nounwind {
; SPILL-O2-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
; SPILL-O2-NEXT: #APP
; SPILL-O2-NEXT: #NO_APP
-; SPILL-O2-NEXT: addi a0, sp, 16
; SPILL-O2-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
; SPILL-O2-NEXT: csrr a0, vlenb
; SPILL-O2-NEXT: slli a0, a0, 1
@@ -119,7 +117,6 @@ define <vscale x 4 x i32> @spill_lmul_2(<vscale x 4 x i32> %va) nounwind {
; SPILL-O2-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
; SPILL-O2-NEXT: #APP
; SPILL-O2-NEXT: #NO_APP
-; SPILL-O2-NEXT: addi a0, sp, 16
; SPILL-O2-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
; SPILL-O2-NEXT: csrr a0, vlenb
; SPILL-O2-NEXT: slli a0, a0, 1
@@ -162,7 +159,6 @@ define <vscale x 8 x i32> @spill_lmul_4(<vscale x 8 x i32> %va) nounwind {
; SPILL-O2-NEXT: vs4r.v v8, (a0) # Unknown-size Folded Spill
; SPILL-O2-NEXT: #APP
; SPILL-O2-NEXT: #NO_APP
-; SPILL-O2-NEXT: addi a0, sp, 16
; SPILL-O2-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload
; SPILL-O2-NEXT: csrr a0, vlenb
; SPILL-O2-NEXT: slli a0, a0, 2
@@ -205,7 +201,6 @@ define <vscale x 16 x i32> @spill_lmul_8(<vscale x 16 x i32> %va) nounwind {
; SPILL-O2-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; SPILL-O2-NEXT: #APP
; SPILL-O2-NEXT: #NO_APP
-; SPILL-O2-NEXT: addi a0, sp, 16
; SPILL-O2-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; SPILL-O2-NEXT: csrr a0, vlenb
; SPILL-O2-NEXT: slli a0, a0, 3
diff --git a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-zvlsseg.ll b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-zvlsseg.ll
index 92bd1e92cedb8..e7e75ad8cc00c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-zvlsseg.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-zvlsseg.ll
@@ -42,7 +42,6 @@ define <vscale x 1 x i32> @spill_zvlsseg_nxv1i32(i32* %base, i32 %vl) nounwind {
; SPILL-O2-NEXT: #APP
; SPILL-O2-NEXT: #NO_APP
; SPILL-O2-NEXT: addi a0, sp, 16
-; SPILL-O2-NEXT: csrr a1, vlenb
; SPILL-O2-NEXT: vl1r.v v7, (a0) # Unknown-size Folded Reload
; SPILL-O2-NEXT: add a0, a0, a1
; SPILL-O2-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
@@ -98,7 +97,6 @@ define <vscale x 2 x i32> @spill_zvlsseg_nxv2i32(i32* %base, i32 %vl) nounwind {
; SPILL-O2-NEXT: #APP
; SPILL-O2-NEXT: #NO_APP
; SPILL-O2-NEXT: addi a0, sp, 16
-; SPILL-O2-NEXT: csrr a1, vlenb
; SPILL-O2-NEXT: vl1r.v v7, (a0) # Unknown-size Folded Reload
; SPILL-O2-NEXT: add a0, a0, a1
; SPILL-O2-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector.ll b/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector.ll
index ffc5413322bc2..30075c2dad516 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector.ll
@@ -33,7 +33,6 @@ define <vscale x 1 x i64> @spill_lmul_1(<vscale x 1 x i64> %va) nounwind {
; SPILL-O2-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
; SPILL-O2-NEXT: #APP
; SPILL-O2-NEXT: #NO_APP
-; SPILL-O2-NEXT: addi a0, sp, 16
; SPILL-O2-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
; SPILL-O2-NEXT: csrr a0, vlenb
; SPILL-O2-NEXT: slli a0, a0, 1
@@ -76,7 +75,6 @@ define <vscale x 2 x i64> @spill_lmul_2(<vscale x 2 x i64> %va) nounwind {
; SPILL-O2-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
; SPILL-O2-NEXT: #APP
; SPILL-O2-NEXT: #NO_APP
-; SPILL-O2-NEXT: addi a0, sp, 16
; SPILL-O2-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
; SPILL-O2-NEXT: csrr a0, vlenb
; SPILL-O2-NEXT: slli a0, a0, 1
@@ -119,7 +117,6 @@ define <vscale x 4 x i64> @spill_lmul_4(<vscale x 4 x i64> %va) nounwind {
; SPILL-O2-NEXT: vs4r.v v8, (a0) # Unknown-size Folded Spill
; SPILL-O2-NEXT: #APP
; SPILL-O2-NEXT: #NO_APP
-; SPILL-O2-NEXT: addi a0, sp, 16
; SPILL-O2-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload
; SPILL-O2-NEXT: csrr a0, vlenb
; SPILL-O2-NEXT: slli a0, a0, 2
@@ -162,7 +159,6 @@ define <vscale x 8 x i64> @spill_lmul_8(<vscale x 8 x i64> %va) nounwind {
; SPILL-O2-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; SPILL-O2-NEXT: #APP
; SPILL-O2-NEXT: #NO_APP
-; SPILL-O2-NEXT: addi a0, sp, 16
; SPILL-O2-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; SPILL-O2-NEXT: csrr a0, vlenb
; SPILL-O2-NEXT: slli a0, a0, 3
diff --git a/llvm/test/CodeGen/RISCV/rvv/rv64-spill-zvlsseg.ll b/llvm/test/CodeGen/RISCV/rvv/rv64-spill-zvlsseg.ll
index a6afa45cdef63..b834268d5ccf2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rv64-spill-zvlsseg.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rv64-spill-zvlsseg.ll
@@ -42,7 +42,6 @@ define <vscale x 1 x i32> @spill_zvlsseg_nxv1i32(i32* %base, i64 %vl) nounwind {
; SPILL-O2-NEXT: #APP
; SPILL-O2-NEXT: #NO_APP
; SPILL-O2-NEXT: addi a0, sp, 16
-; SPILL-O2-NEXT: csrr a1, vlenb
; SPILL-O2-NEXT: vl1r.v v7, (a0) # Unknown-size Folded Reload
; SPILL-O2-NEXT: add a0, a0, a1
; SPILL-O2-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
@@ -98,7 +97,6 @@ define <vscale x 2 x i32> @spill_zvlsseg_nxv2i32(i32* %base, i64 %vl) nounwind {
; SPILL-O2-NEXT: #APP
; SPILL-O2-NEXT: #NO_APP
; SPILL-O2-NEXT: addi a0, sp, 16
-; SPILL-O2-NEXT: csrr a1, vlenb
; SPILL-O2-NEXT: vl1r.v v7, (a0) # Unknown-size Folded Reload
; SPILL-O2-NEXT: add a0, a0, a1
; SPILL-O2-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/stack-realignment.ll b/llvm/test/CodeGen/RISCV/stack-realignment.ll
index e5ddb23270951..e7f053f995eed 100644
--- a/llvm/test/CodeGen/RISCV/stack-realignment.ll
+++ b/llvm/test/CodeGen/RISCV/stack-realignment.ll
@@ -547,7 +547,6 @@ define void @caller4096() {
; RV32I-NEXT: call callee at plt
; RV32I-NEXT: lui a0, 2
; RV32I-NEXT: sub sp, s0, a0
-; RV32I-NEXT: lui a0, 2
; RV32I-NEXT: addi a0, a0, -2032
; RV32I-NEXT: add sp, sp, a0
; RV32I-NEXT: lw ra, 2028(sp) # 4-byte Folded Reload
@@ -575,7 +574,6 @@ define void @caller4096() {
; RV64I-NEXT: call callee at plt
; RV64I-NEXT: lui a0, 2
; RV64I-NEXT: sub sp, s0, a0
-; RV64I-NEXT: lui a0, 2
; RV64I-NEXT: addiw a0, a0, -2032
; RV64I-NEXT: add sp, sp, a0
; RV64I-NEXT: ld ra, 2024(sp) # 8-byte Folded Reload
More information about the llvm-commits
mailing list