[llvm] [RISCV] Enable rematerialization for scalar loads (PR #166774)

Luke Lau via llvm-commits llvm-commits at lists.llvm.org
Mon Nov 17 22:57:41 PST 2025


https://github.com/lukel97 updated https://github.com/llvm/llvm-project/pull/166774

>From bc86a8f56a48de75c969bee4dfd3e8a8940e0209 Mon Sep 17 00:00:00 2001
From: Luke Lau <luke at igalia.com>
Date: Mon, 20 Oct 2025 18:06:11 +0800
Subject: [PATCH 1/5] Precommit tests

---
 llvm/test/CodeGen/RISCV/remat.ll | 176 ++++++++++++++++++++++++++++++-
 1 file changed, 175 insertions(+), 1 deletion(-)

diff --git a/llvm/test/CodeGen/RISCV/remat.ll b/llvm/test/CodeGen/RISCV/remat.ll
index 8490dd0877d30..ffd58070f36aa 100644
--- a/llvm/test/CodeGen/RISCV/remat.ll
+++ b/llvm/test/CodeGen/RISCV/remat.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -O1 -mtriple=riscv64 -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -O1 -mtriple=riscv64 -mattr=+d,+zfh,+zfbfmin -verify-machineinstrs < %s | FileCheck %s
 
 @a = common global i32 0, align 4
 @l = common global i32 0, align 4
@@ -200,3 +200,177 @@ for.end:                                          ; preds = %for.inc, %entry
 }
 
 declare i32 @foo(i32, i32, i32, i32, i32, i32)
+
+define void @remat_load(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, double %8, double %9, double %10, double %11, double %12, double %13, double %14, double %15, i8 %stackarg0, i16 %stackarg1, i32 %stackarg2, i64 %stackarg3, half %stackarg4, bfloat %stackarg5, float %stackarg6, double %stackarg7, ptr %p) nounwind {
+; CHECK-LABEL: remat_load:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -256
+; CHECK-NEXT:    sd ra, 248(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s0, 240(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s1, 232(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s2, 224(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s3, 216(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s4, 208(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s5, 200(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s6, 192(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s7, 184(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s8, 176(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s9, 168(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s10, 160(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s11, 152(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    fsd fs0, 144(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    fsd fs1, 136(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    fsd fs2, 128(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    fsd fs3, 120(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    fsd fs4, 112(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    fsd fs5, 104(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    fsd fs6, 96(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    fsd fs7, 88(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    fsd fs8, 80(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    fsd fs9, 72(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    fsd fs10, 64(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    fsd fs11, 56(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    fld fa5, 312(sp)
+; CHECK-NEXT:    fsd fa5, 48(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    flw fa4, 304(sp)
+; CHECK-NEXT:    fsw fa4, 44(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    flh fa3, 296(sp)
+; CHECK-NEXT:    fsh fa3, 42(sp) # 2-byte Folded Spill
+; CHECK-NEXT:    flh fa2, 288(sp)
+; CHECK-NEXT:    fsh fa2, 40(sp) # 2-byte Folded Spill
+; CHECK-NEXT:    ld a0, 320(sp)
+; CHECK-NEXT:    sd a0, 0(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    lbu a4, 256(sp)
+; CHECK-NEXT:    sd a4, 8(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    lh a3, 264(sp)
+; CHECK-NEXT:    sd a3, 16(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    lw a2, 272(sp)
+; CHECK-NEXT:    sd a2, 24(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    ld a1, 280(sp)
+; CHECK-NEXT:    sd a1, 32(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sb a4, 0(a0)
+; CHECK-NEXT:    sh a3, 0(a0)
+; CHECK-NEXT:    sw a2, 0(a0)
+; CHECK-NEXT:    sd a1, 0(a0)
+; CHECK-NEXT:    fsh fa2, 0(a0)
+; CHECK-NEXT:    fsh fa3, 0(a0)
+; CHECK-NEXT:    fsw fa4, 0(a0)
+; CHECK-NEXT:    fsd fa5, 0(a0)
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    ld a0, 0(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld a1, 8(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    sb a1, 0(a0)
+; CHECK-NEXT:    ld a1, 16(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    sh a1, 0(a0)
+; CHECK-NEXT:    ld a1, 24(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    sw a1, 0(a0)
+; CHECK-NEXT:    ld a1, 32(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    sd a1, 0(a0)
+; CHECK-NEXT:    flh fa5, 40(sp) # 2-byte Folded Reload
+; CHECK-NEXT:    fsh fa5, 0(a0)
+; CHECK-NEXT:    flh fa5, 42(sp) # 2-byte Folded Reload
+; CHECK-NEXT:    fsh fa5, 0(a0)
+; CHECK-NEXT:    flw fa5, 44(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    fsw fa5, 0(a0)
+; CHECK-NEXT:    fld fa5, 48(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    fsd fa5, 0(a0)
+; CHECK-NEXT:    ld ra, 248(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s0, 240(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s1, 232(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s2, 224(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s3, 216(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s4, 208(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s5, 200(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s6, 192(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s7, 184(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s8, 176(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s9, 168(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s10, 160(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s11, 152(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    fld fs0, 144(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    fld fs1, 136(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    fld fs2, 128(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    fld fs3, 120(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    fld fs4, 112(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    fld fs5, 104(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    fld fs6, 96(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    fld fs7, 88(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    fld fs8, 80(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    fld fs9, 72(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    fld fs10, 64(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    fld fs11, 56(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    addi sp, sp, 256
+; CHECK-NEXT:    ret
+entry:
+  ; Force loading the stack arguments to create their live interval
+  store volatile i8 %stackarg0, ptr %p
+  store volatile i16 %stackarg1, ptr %p
+  store volatile i32 %stackarg2, ptr %p
+  store volatile i64 %stackarg3, ptr %p
+  store volatile half %stackarg4, ptr %p
+  store volatile bfloat %stackarg5, ptr %p
+  store volatile float %stackarg6, ptr %p
+  store volatile double %stackarg7, ptr %p
+  tail call void asm sideeffect "", "~{x1},~{x3},~{x4},~{x5},~{x6},~{x7},~{x8},~{x9},~{x10},~{x11},~{x12},~{x13},~{x14},~{x15},~{x16},~{x17},~{x18},~{x19},~{x20},~{x21},~{x22},~{x23},~{x24},~{x25},~{x26},~{x27},~{x28},~{x29},~{x30},~{x31},~{f0},~{f1},~{f2},~{f3},~{f4},~{f5},~{f6},~{f7},~{f8},~{f9},~{f10},~{f11},~{f12},~{f13},~{f14},~{f15},~{f16},~{f17},~{f18},~{f19},~{f20},~{f21},~{f22},~{f23},~{f24},~{f25},~{f26},~{f27},~{f28},~{f29},~{f30},~{f31}"()
+  ; Now use them after spilling everything to force rematerialization
+  store volatile i8 %stackarg0, ptr %p
+  store volatile i16 %stackarg1, ptr %p
+  store volatile i32 %stackarg2, ptr %p
+  store volatile i64 %stackarg3, ptr %p
+  store volatile half %stackarg4, ptr %p
+  store volatile bfloat %stackarg5, ptr %p
+  store volatile float %stackarg6, ptr %p
+  store volatile double %stackarg7, ptr %p
+  ret void
+}
+
+; We could remat the load of the constant global if we extended the live
+; interval of the high bits of the address.
+
+ at const = constant i32 42
+define i32 @constglobal_load() nounwind {
+; CHECK-LABEL: constglobal_load:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -112
+; CHECK-NEXT:    sd ra, 104(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s0, 96(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s1, 88(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s2, 80(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s3, 72(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s4, 64(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s5, 56(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s6, 48(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s7, 40(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s8, 32(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s9, 24(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s10, 16(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s11, 8(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    lui a0, %hi(const)
+; CHECK-NEXT:    lw a0, %lo(const)(a0)
+; CHECK-NEXT:    sd a0, 0(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    ld a0, 0(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    addiw a0, a0, 1
+; CHECK-NEXT:    ld ra, 104(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s0, 96(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s1, 88(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s2, 80(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s3, 72(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s4, 64(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s5, 56(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s6, 48(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s7, 40(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s8, 32(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s9, 24(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s10, 16(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s11, 8(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    addi sp, sp, 112
+; CHECK-NEXT:    ret
+entry:
+  %global = load i32, ptr @const
+  tail call void asm sideeffect "", "~{x1},~{x3},~{x4},~{x5},~{x6},~{x7},~{x8},~{x9},~{x10},~{x11},~{x12},~{x13},~{x14},~{x15},~{x16},~{x17},~{x18},~{x19},~{x20},~{x21},~{x22},~{x23},~{x24},~{x25},~{x26},~{x27},~{x28},~{x29},~{x30},~{x31}"()
+  %a = add i32 %global, 1
+  ret i32 %a
+}

>From 2b0f29fc4fab4d60948a380b842c2e213d0594c3 Mon Sep 17 00:00:00 2001
From: Luke Lau <luke at igalia.com>
Date: Mon, 20 Oct 2025 19:04:28 +0800
Subject: [PATCH 2/5] [RISCV] Enable rematerialization for scalar loads

---
 llvm/lib/Target/RISCV/RISCVInstrInfo.td    |   4 +-
 llvm/lib/Target/RISCV/RISCVInstrInfoD.td   |   2 +-
 llvm/lib/Target/RISCV/RISCVInstrInfoF.td   |   2 +-
 llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td |   2 +-
 llvm/test/CodeGen/RISCV/remat.ll           | 149 ++++++++++-----------
 llvm/test/CodeGen/RISCV/rvv/pr95865.ll     |   4 +-
 6 files changed, 76 insertions(+), 87 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
index 9cb53fb27a2d2..84b962b2a8607 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
@@ -768,7 +768,7 @@ def BGE  : BranchCC_rri<0b101, "bge">;
 def BLTU : BranchCC_rri<0b110, "bltu">;
 def BGEU : BranchCC_rri<0b111, "bgeu">;
 
-let IsSignExtendingOpW = 1, canFoldAsLoad = 1 in {
+let IsSignExtendingOpW = 1, canFoldAsLoad = 1, isReMaterializable = 1 in {
 def LB  : Load_ri<0b000, "lb">, Sched<[WriteLDB, ReadMemBase]>;
 def LH  : Load_ri<0b001, "lh">, Sched<[WriteLDH, ReadMemBase]>;
 def LW  : Load_ri<0b010, "lw">, Sched<[WriteLDW, ReadMemBase]>;
@@ -889,7 +889,7 @@ def CSRRCI : CSR_ii<0b111, "csrrci">;
 /// RV64I instructions
 
 let Predicates = [IsRV64] in {
-let canFoldAsLoad = 1 in {
+let canFoldAsLoad = 1, isReMaterializable = 1 in {
 def LWU   : Load_ri<0b110, "lwu">, Sched<[WriteLDW, ReadMemBase]>;
 def LD    : Load_ri<0b011, "ld">, Sched<[WriteLDD, ReadMemBase]>;
 }
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoD.td b/llvm/lib/Target/RISCV/RISCVInstrInfoD.td
index 4ffe3e62ac501..deacd41e6469a 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoD.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoD.td
@@ -71,7 +71,7 @@ defvar DExtsRV64 = [DExt, ZdinxExt];
 //===----------------------------------------------------------------------===//
 
 let Predicates = [HasStdExtD] in {
-let canFoldAsLoad = 1 in
+let canFoldAsLoad = 1, isReMaterializable = 1 in
 def FLD : FPLoad_r<0b011, "fld", FPR64, WriteFLD64>;
 
 // Operands for stores are in the order srcreg, base, offset rather than
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoF.td b/llvm/lib/Target/RISCV/RISCVInstrInfoF.td
index b30f8ec820c15..bd191001b75ec 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoF.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoF.td
@@ -330,7 +330,7 @@ class PseudoFROUND<DAGOperand Ty, ValueType vt, ValueType intvt = XLenVT>
 //===----------------------------------------------------------------------===//
 
 let Predicates = [HasStdExtF] in {
-let canFoldAsLoad = 1 in
+let canFoldAsLoad = 1, isReMaterializable = 1 in
 def FLW : FPLoad_r<0b010, "flw", FPR32, WriteFLD32>;
 
 // Operands for stores are in the order srcreg, base, offset rather than
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td
index 1c6a5afcda49b..c172d1739ba61 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td
@@ -90,7 +90,7 @@ defvar ZfhminDExts = [ZfhminDExt, ZhinxminZdinxExt, ZhinxminZdinx32Ext];
 //===----------------------------------------------------------------------===//
 
 let Predicates = [HasHalfFPLoadStoreMove] in {
-let canFoldAsLoad = 1 in
+let canFoldAsLoad = 1, isReMaterializable = 1 in
 def FLH : FPLoad_r<0b001, "flh", FPR16, WriteFLD16>;
 
 // Operands for stores are in the order srcreg, base, offset rather than
diff --git a/llvm/test/CodeGen/RISCV/remat.ll b/llvm/test/CodeGen/RISCV/remat.ll
index ffd58070f36aa..532f833ed70c8 100644
--- a/llvm/test/CodeGen/RISCV/remat.ll
+++ b/llvm/test/CodeGen/RISCV/remat.ll
@@ -204,50 +204,41 @@ declare i32 @foo(i32, i32, i32, i32, i32, i32)
 define void @remat_load(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, double %8, double %9, double %10, double %11, double %12, double %13, double %14, double %15, i8 %stackarg0, i16 %stackarg1, i32 %stackarg2, i64 %stackarg3, half %stackarg4, bfloat %stackarg5, float %stackarg6, double %stackarg7, ptr %p) nounwind {
 ; CHECK-LABEL: remat_load:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -256
-; CHECK-NEXT:    sd ra, 248(sp) # 8-byte Folded Spill
-; CHECK-NEXT:    sd s0, 240(sp) # 8-byte Folded Spill
-; CHECK-NEXT:    sd s1, 232(sp) # 8-byte Folded Spill
-; CHECK-NEXT:    sd s2, 224(sp) # 8-byte Folded Spill
-; CHECK-NEXT:    sd s3, 216(sp) # 8-byte Folded Spill
-; CHECK-NEXT:    sd s4, 208(sp) # 8-byte Folded Spill
-; CHECK-NEXT:    sd s5, 200(sp) # 8-byte Folded Spill
-; CHECK-NEXT:    sd s6, 192(sp) # 8-byte Folded Spill
-; CHECK-NEXT:    sd s7, 184(sp) # 8-byte Folded Spill
-; CHECK-NEXT:    sd s8, 176(sp) # 8-byte Folded Spill
-; CHECK-NEXT:    sd s9, 168(sp) # 8-byte Folded Spill
-; CHECK-NEXT:    sd s10, 160(sp) # 8-byte Folded Spill
-; CHECK-NEXT:    sd s11, 152(sp) # 8-byte Folded Spill
-; CHECK-NEXT:    fsd fs0, 144(sp) # 8-byte Folded Spill
-; CHECK-NEXT:    fsd fs1, 136(sp) # 8-byte Folded Spill
-; CHECK-NEXT:    fsd fs2, 128(sp) # 8-byte Folded Spill
-; CHECK-NEXT:    fsd fs3, 120(sp) # 8-byte Folded Spill
-; CHECK-NEXT:    fsd fs4, 112(sp) # 8-byte Folded Spill
-; CHECK-NEXT:    fsd fs5, 104(sp) # 8-byte Folded Spill
-; CHECK-NEXT:    fsd fs6, 96(sp) # 8-byte Folded Spill
-; CHECK-NEXT:    fsd fs7, 88(sp) # 8-byte Folded Spill
-; CHECK-NEXT:    fsd fs8, 80(sp) # 8-byte Folded Spill
-; CHECK-NEXT:    fsd fs9, 72(sp) # 8-byte Folded Spill
-; CHECK-NEXT:    fsd fs10, 64(sp) # 8-byte Folded Spill
-; CHECK-NEXT:    fsd fs11, 56(sp) # 8-byte Folded Spill
-; CHECK-NEXT:    fld fa5, 312(sp)
-; CHECK-NEXT:    fsd fa5, 48(sp) # 8-byte Folded Spill
-; CHECK-NEXT:    flw fa4, 304(sp)
-; CHECK-NEXT:    fsw fa4, 44(sp) # 4-byte Folded Spill
-; CHECK-NEXT:    flh fa3, 296(sp)
-; CHECK-NEXT:    fsh fa3, 42(sp) # 2-byte Folded Spill
-; CHECK-NEXT:    flh fa2, 288(sp)
-; CHECK-NEXT:    fsh fa2, 40(sp) # 2-byte Folded Spill
-; CHECK-NEXT:    ld a0, 320(sp)
-; CHECK-NEXT:    sd a0, 0(sp) # 8-byte Folded Spill
-; CHECK-NEXT:    lbu a4, 256(sp)
-; CHECK-NEXT:    sd a4, 8(sp) # 8-byte Folded Spill
-; CHECK-NEXT:    lh a3, 264(sp)
-; CHECK-NEXT:    sd a3, 16(sp) # 8-byte Folded Spill
-; CHECK-NEXT:    lw a2, 272(sp)
-; CHECK-NEXT:    sd a2, 24(sp) # 8-byte Folded Spill
-; CHECK-NEXT:    ld a1, 280(sp)
-; CHECK-NEXT:    sd a1, 32(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    addi sp, sp, -208
+; CHECK-NEXT:    sd ra, 200(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s0, 192(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s1, 184(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s2, 176(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s3, 168(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s4, 160(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s5, 152(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s6, 144(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s7, 136(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s8, 128(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s9, 120(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s10, 112(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s11, 104(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    fsd fs0, 96(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    fsd fs1, 88(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    fsd fs2, 80(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    fsd fs3, 72(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    fsd fs4, 64(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    fsd fs5, 56(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    fsd fs6, 48(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    fsd fs7, 40(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    fsd fs8, 32(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    fsd fs9, 24(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    fsd fs10, 16(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    fsd fs11, 8(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    fld fa5, 264(sp)
+; CHECK-NEXT:    flw fa4, 256(sp)
+; CHECK-NEXT:    flh fa3, 248(sp)
+; CHECK-NEXT:    flh fa2, 240(sp)
+; CHECK-NEXT:    ld a0, 272(sp)
+; CHECK-NEXT:    lbu a4, 208(sp)
+; CHECK-NEXT:    lh a3, 216(sp)
+; CHECK-NEXT:    lw a2, 224(sp)
+; CHECK-NEXT:    ld a1, 232(sp)
 ; CHECK-NEXT:    sb a4, 0(a0)
 ; CHECK-NEXT:    sh a3, 0(a0)
 ; CHECK-NEXT:    sw a2, 0(a0)
@@ -258,49 +249,49 @@ define void @remat_load(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6,
 ; CHECK-NEXT:    fsd fa5, 0(a0)
 ; CHECK-NEXT:    #APP
 ; CHECK-NEXT:    #NO_APP
-; CHECK-NEXT:    ld a0, 0(sp) # 8-byte Folded Reload
-; CHECK-NEXT:    ld a1, 8(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld a0, 272(sp)
+; CHECK-NEXT:    lbu a1, 208(sp)
 ; CHECK-NEXT:    sb a1, 0(a0)
-; CHECK-NEXT:    ld a1, 16(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    lh a1, 216(sp)
 ; CHECK-NEXT:    sh a1, 0(a0)
-; CHECK-NEXT:    ld a1, 24(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    lw a1, 224(sp)
 ; CHECK-NEXT:    sw a1, 0(a0)
-; CHECK-NEXT:    ld a1, 32(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld a1, 232(sp)
 ; CHECK-NEXT:    sd a1, 0(a0)
-; CHECK-NEXT:    flh fa5, 40(sp) # 2-byte Folded Reload
+; CHECK-NEXT:    flh fa5, 240(sp)
 ; CHECK-NEXT:    fsh fa5, 0(a0)
-; CHECK-NEXT:    flh fa5, 42(sp) # 2-byte Folded Reload
+; CHECK-NEXT:    flh fa5, 248(sp)
 ; CHECK-NEXT:    fsh fa5, 0(a0)
-; CHECK-NEXT:    flw fa5, 44(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    flw fa5, 256(sp)
 ; CHECK-NEXT:    fsw fa5, 0(a0)
-; CHECK-NEXT:    fld fa5, 48(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    fld fa5, 264(sp)
 ; CHECK-NEXT:    fsd fa5, 0(a0)
-; CHECK-NEXT:    ld ra, 248(sp) # 8-byte Folded Reload
-; CHECK-NEXT:    ld s0, 240(sp) # 8-byte Folded Reload
-; CHECK-NEXT:    ld s1, 232(sp) # 8-byte Folded Reload
-; CHECK-NEXT:    ld s2, 224(sp) # 8-byte Folded Reload
-; CHECK-NEXT:    ld s3, 216(sp) # 8-byte Folded Reload
-; CHECK-NEXT:    ld s4, 208(sp) # 8-byte Folded Reload
-; CHECK-NEXT:    ld s5, 200(sp) # 8-byte Folded Reload
-; CHECK-NEXT:    ld s6, 192(sp) # 8-byte Folded Reload
-; CHECK-NEXT:    ld s7, 184(sp) # 8-byte Folded Reload
-; CHECK-NEXT:    ld s8, 176(sp) # 8-byte Folded Reload
-; CHECK-NEXT:    ld s9, 168(sp) # 8-byte Folded Reload
-; CHECK-NEXT:    ld s10, 160(sp) # 8-byte Folded Reload
-; CHECK-NEXT:    ld s11, 152(sp) # 8-byte Folded Reload
-; CHECK-NEXT:    fld fs0, 144(sp) # 8-byte Folded Reload
-; CHECK-NEXT:    fld fs1, 136(sp) # 8-byte Folded Reload
-; CHECK-NEXT:    fld fs2, 128(sp) # 8-byte Folded Reload
-; CHECK-NEXT:    fld fs3, 120(sp) # 8-byte Folded Reload
-; CHECK-NEXT:    fld fs4, 112(sp) # 8-byte Folded Reload
-; CHECK-NEXT:    fld fs5, 104(sp) # 8-byte Folded Reload
-; CHECK-NEXT:    fld fs6, 96(sp) # 8-byte Folded Reload
-; CHECK-NEXT:    fld fs7, 88(sp) # 8-byte Folded Reload
-; CHECK-NEXT:    fld fs8, 80(sp) # 8-byte Folded Reload
-; CHECK-NEXT:    fld fs9, 72(sp) # 8-byte Folded Reload
-; CHECK-NEXT:    fld fs10, 64(sp) # 8-byte Folded Reload
-; CHECK-NEXT:    fld fs11, 56(sp) # 8-byte Folded Reload
-; CHECK-NEXT:    addi sp, sp, 256
+; CHECK-NEXT:    ld ra, 200(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s0, 192(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s1, 184(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s2, 176(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s3, 168(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s4, 160(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s5, 152(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s6, 144(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s7, 136(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s8, 128(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s9, 120(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s10, 112(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s11, 104(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    fld fs0, 96(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    fld fs1, 88(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    fld fs2, 80(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    fld fs3, 72(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    fld fs4, 64(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    fld fs5, 56(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    fld fs6, 48(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    fld fs7, 40(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    fld fs8, 32(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    fld fs9, 24(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    fld fs10, 16(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    fld fs11, 8(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    addi sp, sp, 208
 ; CHECK-NEXT:    ret
 entry:
   ; Force loading the stack arguments to create their live interval
diff --git a/llvm/test/CodeGen/RISCV/rvv/pr95865.ll b/llvm/test/CodeGen/RISCV/rvv/pr95865.ll
index ab9849631663c..01d66b344ec2e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/pr95865.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/pr95865.ll
@@ -40,8 +40,6 @@ define i32 @main(i1 %arg.1, i64 %arg.2, i1 %arg.3, i64 %arg.4, i1 %arg.5, <vscal
 ; CHECK-NEXT:    li t0, 12
 ; CHECK-NEXT:    li s0, 4
 ; CHECK-NEXT:    li t1, 20
-; CHECK-NEXT:    ld a1, 112(sp)
-; CHECK-NEXT:    sd a1, 0(sp) # 8-byte Folded Spill
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; CHECK-NEXT:    vmv.v.i v8, 0
 ; CHECK-NEXT:    andi t3, a4, 1
@@ -142,7 +140,7 @@ define i32 @main(i1 %arg.1, i64 %arg.2, i1 %arg.3, i64 %arg.4, i1 %arg.5, <vscal
 ; CHECK-NEXT:    j .LBB0_11
 ; CHECK-NEXT:  .LBB0_12: # %for.body7.us.19
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
-; CHECK-NEXT:    ld a0, 0(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld a0, 112(sp)
 ; CHECK-NEXT:    vmv.s.x v16, a0
 ; CHECK-NEXT:    vmv.v.i v8, 0
 ; CHECK-NEXT:    vsetivli zero, 2, e32, m1, tu, ma

>From 2e9024a6d6fd410e7aada7f2ffd9eecf2c57ed63 Mon Sep 17 00:00:00 2001
From: Luke Lau <luke at igalia.com>
Date: Mon, 10 Nov 2025 12:29:26 +0800
Subject: [PATCH 3/5] Clarify why we need the volatile stores

---
 llvm/test/CodeGen/RISCV/remat.ll | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/llvm/test/CodeGen/RISCV/remat.ll b/llvm/test/CodeGen/RISCV/remat.ll
index 532f833ed70c8..b893053033007 100644
--- a/llvm/test/CodeGen/RISCV/remat.ll
+++ b/llvm/test/CodeGen/RISCV/remat.ll
@@ -294,7 +294,8 @@ define void @remat_load(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6,
 ; CHECK-NEXT:    addi sp, sp, 208
 ; CHECK-NEXT:    ret
 entry:
-  ; Force loading the stack arguments to create their live interval
+  ; Add a use of the stack arguments here so that we will have to load them from
+  ; the stack before the inline asm
   store volatile i8 %stackarg0, ptr %p
   store volatile i16 %stackarg1, ptr %p
   store volatile i32 %stackarg2, ptr %p

>From 86ddeeb8c24c574dcb38a22450d4eb6d1cc3a395 Mon Sep 17 00:00:00 2001
From: Luke Lau <luke at igalia.com>
Date: Tue, 18 Nov 2025 14:56:21 +0800
Subject: [PATCH 4/5] Change to external constant

---
 llvm/test/CodeGen/RISCV/remat.ll | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/llvm/test/CodeGen/RISCV/remat.ll b/llvm/test/CodeGen/RISCV/remat.ll
index b893053033007..8397b298a578f 100644
--- a/llvm/test/CodeGen/RISCV/remat.ll
+++ b/llvm/test/CodeGen/RISCV/remat.ll
@@ -320,7 +320,7 @@ entry:
 ; We could remat the load of the constant global if we extended the live
 ; interval of the high bits of the address.
 
- at const = constant i32 42
+ at const = external constant i32
 define i32 @constglobal_load() nounwind {
 ; CHECK-LABEL: constglobal_load:
 ; CHECK:       # %bb.0: # %entry

>From a80772a8f89f7826b4e824cd5e779feaf30f745d Mon Sep 17 00:00:00 2001
From: Luke Lau <luke at igalia.com>
Date: Tue, 18 Nov 2025 14:57:19 +0800
Subject: [PATCH 5/5] Adjust comment

---
 llvm/test/CodeGen/RISCV/remat.ll | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/llvm/test/CodeGen/RISCV/remat.ll b/llvm/test/CodeGen/RISCV/remat.ll
index 8397b298a578f..8a252751165d0 100644
--- a/llvm/test/CodeGen/RISCV/remat.ll
+++ b/llvm/test/CodeGen/RISCV/remat.ll
@@ -295,7 +295,8 @@ define void @remat_load(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6,
 ; CHECK-NEXT:    ret
 entry:
   ; Add a use of the stack arguments here so that we will have to load them from
-  ; the stack before the inline asm
+  ; the stack before the inline asm. Otherwise we would be exercising the
+  ; machine scheduler, not rematerialization.
   store volatile i8 %stackarg0, ptr %p
   store volatile i16 %stackarg1, ptr %p
   store volatile i32 %stackarg2, ptr %p



More information about the llvm-commits mailing list