[llvm] [SystemZ] Enable rematerialization for scalar loads (PR #179838)

via llvm-commits llvm-commits at lists.llvm.org
Wed Feb 4 17:58:31 PST 2026


https://github.com/anoopkg6 created https://github.com/llvm/llvm-project/pull/179838

We can avoid the unnecessary spill by marking loads as rematerializable and just directly loading from where the argument was originally passed on the stack. TargetTransformInfo::isReMaterializableImpl checks to make sure that any loads are MI.isDereferenceableInvariantLoad(), so we should be able to move the load down to the remat site.

Related: [#166774](https://github.com/llvm/llvm-project/pull/166774)

>From ca2dc9dfca39cf05e5c39dfc7405fd68a490cfbe Mon Sep 17 00:00:00 2001
From: anoopkg6 <anoopkg6 at github.com>
Date: Thu, 5 Feb 2026 02:31:16 +0100
Subject: [PATCH] [SystemZ] Enable rematerialization for scalar loads as in
 #166774. This avoids the unnecessary spill by marking loads as
 rematerializable and just directly loading from where the argument was
 originally passed on the stack - moves the load down to the remat site. Needs
 to have a measurement on the performance machine.

---
 llvm/lib/Target/SystemZ/SystemZInstrFP.td     |  32 ++---
 llvm/lib/Target/SystemZ/SystemZInstrInfo.td   |  28 ++--
 llvm/lib/Target/SystemZ/SystemZInstrVector.td |   8 +-
 .../CodeGen/SystemZ/builtin-setjmp-spills.ll  |  98 ++++++--------
 llvm/test/CodeGen/SystemZ/fp-move-02.ll       |  26 ++--
 llvm/test/CodeGen/SystemZ/remat.ll            | 127 ++++++++++++++++++
 6 files changed, 216 insertions(+), 103 deletions(-)
 create mode 100644 llvm/test/CodeGen/SystemZ/remat.ll

diff --git a/llvm/lib/Target/SystemZ/SystemZInstrFP.td b/llvm/lib/Target/SystemZ/SystemZInstrFP.td
index 33f73bc658b25..dad603b2c347d 100644
--- a/llvm/lib/Target/SystemZ/SystemZInstrFP.td
+++ b/llvm/lib/Target/SystemZ/SystemZInstrFP.td
@@ -157,21 +157,23 @@ defm LoadStoreF128 : MVCLoadStore<load, f128, MVCImm, 15>;
 //===----------------------------------------------------------------------===//
 
 let canFoldAsLoad = 1, SimpleBDXLoad = 1, mayLoad = 1 in {
-  let isCodeGenOnly = 1 in
-    // Reload f16 from 4-byte spill slot.
-    defm LE16 : UnaryRXPair<"le", 0x78, 0xED64, z_load, FP16, 4>;
-  defm LE : UnaryRXPair<"le", 0x78, 0xED64, z_load, FP32, 4>;
-  defm LD : UnaryRXPair<"ld", 0x68, 0xED65, z_load, FP64, 8>;
-
-  // For z13 we prefer LDE over LE to avoid partial register dependencies.
-  let isCodeGenOnly = 1 in
-    def LDE32 : UnaryRXE<"lde", 0xED24, null_frag, FP32, 4>;
-
-  // These instructions are split after register allocation, so we don't
-  // want a custom inserter.
-  let Has20BitOffset = 1, HasIndex = 1, Is128Bit = 1 in {
-    def LX : Pseudo<(outs FP128:$dst), (ins bdxaddr20only128:$src),
-                     [(set FP128:$dst, (load bdxaddr20only128:$src))]>;
+  let isReMaterializable = 1 in {
+    let isCodeGenOnly = 1 in
+      // Reload f16 from 4-byte spill slot.
+      defm LE16 : UnaryRXPair<"le", 0x78, 0xED64, z_load, FP16, 4>;
+    defm LE : UnaryRXPair<"le", 0x78, 0xED64, z_load, FP32, 4>;
+    defm LD : UnaryRXPair<"ld", 0x68, 0xED65, z_load, FP64, 8>;
+
+    // For z13 we prefer LDE over LE to avoid partial register dependencies.
+    let isCodeGenOnly = 1 in
+      def LDE32 : UnaryRXE<"lde", 0xED24, null_frag, FP32, 4>;
+
+    // These instructions are split after register allocation, so we don't
+    // want a custom inserter.
+    let Has20BitOffset = 1, HasIndex = 1, Is128Bit = 1 in {
+      def LX : Pseudo<(outs FP128:$dst), (ins bdxaddr20only128:$src),
+                       [(set FP128:$dst, (load bdxaddr20only128:$src))]>;
+    }
   }
 }
 
diff --git a/llvm/lib/Target/SystemZ/SystemZInstrInfo.td b/llvm/lib/Target/SystemZ/SystemZInstrInfo.td
index 4f75e0132610e..7c231fd6e51b6 100644
--- a/llvm/lib/Target/SystemZ/SystemZInstrInfo.td
+++ b/llvm/lib/Target/SystemZ/SystemZInstrInfo.td
@@ -462,19 +462,21 @@ def LLGHI : InstAlias<"llghi\t$R1, $RI1", (LLILL GR64:$R1, imm64ll16:$RI1)>;
 
 // Register loads.
 let canFoldAsLoad = 1, SimpleBDXLoad = 1, mayLoad = 1 in {
-  // Expands to L, LY or LFH, depending on the choice of register.
-  def LMux : UnaryRXYPseudo<"l", z_load, GRX32, 4>,
-             Requires<[FeatureHighWord]>;
-  defm L : UnaryRXPair<"l", 0x58, 0xE358, z_load, GR32, 4>;
-  def LFH : UnaryRXY<"lfh", 0xE3CA, z_load, GRH32, 4>,
-            Requires<[FeatureHighWord]>;
-  def LG : UnaryRXY<"lg", 0xE304, z_load, GR64, 8>;
+  let isReMaterializable = 1 in {
+    // Expands to L, LY or LFH, depending on the choice of register.
+    def LMux : UnaryRXYPseudo<"l", z_load, GRX32, 4>,
+               Requires<[FeatureHighWord]>;
+    defm L : UnaryRXPair<"l", 0x58, 0xE358, z_load, GR32, 4>;
+    def LFH : UnaryRXY<"lfh", 0xE3CA, z_load, GRH32, 4>,
+              Requires<[FeatureHighWord]>;
+    def LG : UnaryRXY<"lg", 0xE304, z_load, GR64, 8>;
 
-  // These instructions are split after register allocation, so we don't
-  // want a custom inserter.
-  let Has20BitOffset = 1, HasIndex = 1, Is128Bit = 1 in {
-    def L128 : Pseudo<(outs GR128:$dst), (ins bdxaddr20only128:$src),
-                      [(set GR128:$dst, (load bdxaddr20only128:$src))]>;
+    // These instructions are split after register allocation, so we don't
+    // want a custom inserter.
+    let Has20BitOffset = 1, HasIndex = 1, Is128Bit = 1 in {
+      def L128 : Pseudo<(outs GR128:$dst), (ins bdxaddr20only128:$src),
+                        [(set GR128:$dst, (load bdxaddr20only128:$src))]>;
+    }
   }
 }
 let Defs = [CC], CCValues = 0xE, CompareZeroCCMask = 0xE in {
@@ -482,7 +484,7 @@ let Defs = [CC], CCValues = 0xE, CompareZeroCCMask = 0xE in {
   def LTG : UnaryRXY<"ltg", 0xE302, z_load, GR64, 8>;
 }
 
-let canFoldAsLoad = 1 in {
+let canFoldAsLoad = 1, isReMaterializable = 1 in {
   def LRL  : UnaryRILPC<"lrl",  0xC4D, aligned_z_load, GR32>;
   def LGRL : UnaryRILPC<"lgrl", 0xC48, aligned_z_load, GR64>;
 }
diff --git a/llvm/lib/Target/SystemZ/SystemZInstrVector.td b/llvm/lib/Target/SystemZ/SystemZInstrVector.td
index b0257be2eab89..0a2ee5881cb00 100644
--- a/llvm/lib/Target/SystemZ/SystemZInstrVector.td
+++ b/llvm/lib/Target/SystemZ/SystemZInstrVector.td
@@ -146,9 +146,11 @@ let Predicates = [FeatureVector] in {
   // to use those instructions rather than force a 20-bit displacement
   // into a GPR temporary.
   let mayLoad = 1, SimpleBDXLoad = 1, canFoldAsLoad = 1 in {
-    def VL16 : UnaryAliasVRX<z_load, v16hb, bdxaddr12pair>;
-    def VL32 : UnaryAliasVRX<z_load, v32sb, bdxaddr12pair>;
-    def VL64 : UnaryAliasVRX<z_load, v64db, bdxaddr12pair>;
+    let isReMaterializable = 1 in {
+      def VL16 : UnaryAliasVRX<z_load, v16hb, bdxaddr12pair>;
+      def VL32 : UnaryAliasVRX<z_load, v32sb, bdxaddr12pair>;
+      def VL64 : UnaryAliasVRX<z_load, v64db, bdxaddr12pair>;
+    }
   }
 
   // Load logical element and zero.
diff --git a/llvm/test/CodeGen/SystemZ/builtin-setjmp-spills.ll b/llvm/test/CodeGen/SystemZ/builtin-setjmp-spills.ll
index 5626f45ac8bbb..8e369ae46b07d 100644
--- a/llvm/test/CodeGen/SystemZ/builtin-setjmp-spills.ll
+++ b/llvm/test/CodeGen/SystemZ/builtin-setjmp-spills.ll
@@ -47,16 +47,16 @@ define signext i32 @func() {
 ; CHECK-NEXT:    .cfi_offset %r13, -56
 ; CHECK-NEXT:    .cfi_offset %r14, -48
 ; CHECK-NEXT:    .cfi_offset %r15, -40
-; CHECK-NEXT:    aghi %r15, -384
-; CHECK-NEXT:    .cfi_def_cfa_offset 544
-; CHECK-NEXT:    std %f8, 376(%r15) # 8-byte Spill
-; CHECK-NEXT:    std %f9, 368(%r15) # 8-byte Spill
-; CHECK-NEXT:    std %f10, 360(%r15) # 8-byte Spill
-; CHECK-NEXT:    std %f11, 352(%r15) # 8-byte Spill
-; CHECK-NEXT:    std %f12, 344(%r15) # 8-byte Spill
-; CHECK-NEXT:    std %f13, 336(%r15) # 8-byte Spill
-; CHECK-NEXT:    std %f14, 328(%r15) # 8-byte Spill
-; CHECK-NEXT:    std %f15, 320(%r15) # 8-byte Spill
+; CHECK-NEXT:    aghi %r15, -64
+; CHECK-NEXT:    .cfi_def_cfa_offset 224
+; CHECK-NEXT:    std %f8, 56(%r15) # 8-byte Spill
+; CHECK-NEXT:    std %f9, 48(%r15) # 8-byte Spill
+; CHECK-NEXT:    std %f10, 40(%r15) # 8-byte Spill
+; CHECK-NEXT:    std %f11, 32(%r15) # 8-byte Spill
+; CHECK-NEXT:    std %f12, 24(%r15) # 8-byte Spill
+; CHECK-NEXT:    std %f13, 16(%r15) # 8-byte Spill
+; CHECK-NEXT:    std %f14, 8(%r15) # 8-byte Spill
+; CHECK-NEXT:    std %f15, 0(%r15) # 8-byte Spill
 ; CHECK-NEXT:    .cfi_offset %f8, -168
 ; CHECK-NEXT:    .cfi_offset %f9, -176
 ; CHECK-NEXT:    .cfi_offset %f10, -184
@@ -67,64 +67,44 @@ define signext i32 @func() {
 ; CHECK-NEXT:    .cfi_offset %f15, -224
 ; CHECK-NEXT:    lgrl %r1, t at GOT
 ; CHECK-NEXT:    lgrl %r2, s at GOT
-; CHECK-NEXT:    stg %r1, 312(%r15) # 8-byte Spill
 ; CHECK-NEXT:    mvhi 0(%r1), 1
 ; CHECK-NEXT:    lgrl %r1, r at GOT
 ; CHECK-NEXT:    lgrl %r3, q at GOT
-; CHECK-NEXT:    stg %r2, 304(%r15) # 8-byte Spill
 ; CHECK-NEXT:    mvhi 0(%r2), 1
 ; CHECK-NEXT:    lgrl %r2, p at GOT
-; CHECK-NEXT:    stg %r1, 296(%r15) # 8-byte Spill
 ; CHECK-NEXT:    mvhi 0(%r1), 1
-; CHECK-NEXT:    stg %r3, 288(%r15) # 8-byte Spill
 ; CHECK-NEXT:    mvhi 0(%r3), 1
 ; CHECK-NEXT:    lgrl %r1, o at GOT
-; CHECK-NEXT:    stg %r2, 280(%r15) # 8-byte Spill
 ; CHECK-NEXT:    mvhi 0(%r2), 1
 ; CHECK-NEXT:    lgrl %r2, n at GOT
 ; CHECK-NEXT:    lgrl %r3, m at GOT
-; CHECK-NEXT:    stg %r1, 272(%r15) # 8-byte Spill
 ; CHECK-NEXT:    mvhi 0(%r1), 1
 ; CHECK-NEXT:    lgrl %r1, l at GOT
-; CHECK-NEXT:    stg %r2, 264(%r15) # 8-byte Spill
 ; CHECK-NEXT:    mvhi 0(%r2), 1
-; CHECK-NEXT:    stg %r3, 256(%r15) # 8-byte Spill
 ; CHECK-NEXT:    mvhi 0(%r3), 1
 ; CHECK-NEXT:    lgrl %r2, k at GOT
-; CHECK-NEXT:    stg %r1, 248(%r15) # 8-byte Spill
 ; CHECK-NEXT:    mvhi 0(%r1), 1
 ; CHECK-NEXT:    lgrl %r1, j at GOT
 ; CHECK-NEXT:    lgrl %r3, i at GOT
-; CHECK-NEXT:    stg %r2, 240(%r15) # 8-byte Spill
 ; CHECK-NEXT:    mvhi 0(%r2), 1
 ; CHECK-NEXT:    lgrl %r2, h at GOT
-; CHECK-NEXT:    stg %r1, 232(%r15) # 8-byte Spill
 ; CHECK-NEXT:    mvhi 0(%r1), 1
-; CHECK-NEXT:    stg %r3, 224(%r15) # 8-byte Spill
 ; CHECK-NEXT:    mvhi 0(%r3), 1
 ; CHECK-NEXT:    lgrl %r1, g at GOT
-; CHECK-NEXT:    stg %r2, 216(%r15) # 8-byte Spill
 ; CHECK-NEXT:    mvhi 0(%r2), 1
 ; CHECK-NEXT:    lgrl %r2, f at GOT
 ; CHECK-NEXT:    lgrl %r3, e at GOT
-; CHECK-NEXT:    stg %r1, 208(%r15) # 8-byte Spill
 ; CHECK-NEXT:    mvhi 0(%r1), 1
 ; CHECK-NEXT:    lgrl %r1, d at GOT
-; CHECK-NEXT:    stg %r2, 200(%r15) # 8-byte Spill
 ; CHECK-NEXT:    mvhi 0(%r2), 1
-; CHECK-NEXT:    stg %r3, 192(%r15) # 8-byte Spill
 ; CHECK-NEXT:    mvhi 0(%r3), 1
 ; CHECK-NEXT:    lgrl %r2, c at GOT
-; CHECK-NEXT:    stg %r1, 184(%r15) # 8-byte Spill
 ; CHECK-NEXT:    mvhi 0(%r1), 1
 ; CHECK-NEXT:    lgrl %r3, b at GOT
 ; CHECK-NEXT:    lgrl %r4, a at GOT
-; CHECK-NEXT:    stg %r2, 176(%r15) # 8-byte Spill
 ; CHECK-NEXT:    mvhi 0(%r2), 1
 ; CHECK-NEXT:    lgrl %r1, buf at GOT
-; CHECK-NEXT:    stg %r3, 168(%r15) # 8-byte Spill
 ; CHECK-NEXT:    mvhi 0(%r3), 1
-; CHECK-NEXT:    stg %r4, 160(%r15) # 8-byte Spill
 ; CHECK-NEXT:    mvhi 0(%r4), 1
 ; CHECK-NEXT:    larl %r0, .LBB0_2
 ; CHECK-NEXT:    stg %r0, 8(%r1)
@@ -136,56 +116,56 @@ define signext i32 @func() {
 ; CHECK-NEXT:    # %entry
 ; CHECK-NEXT:    lhi %r0, 1
 ; CHECK-NEXT:  .LBB0_3: # %entry
-; CHECK-NEXT:    lg %r1, 160(%r15) # 8-byte Reload
+; CHECK-NEXT:    lgrl %r1, a at GOT
 ; CHECK-NEXT:    a %r0, 0(%r1)
-; CHECK-NEXT:    lg %r1, 168(%r15) # 8-byte Reload
+; CHECK-NEXT:    lgrl %r1, b at GOT
 ; CHECK-NEXT:    a %r0, 0(%r1)
-; CHECK-NEXT:    lg %r1, 176(%r15) # 8-byte Reload
+; CHECK-NEXT:    lgrl %r1, c at GOT
 ; CHECK-NEXT:    a %r0, 0(%r1)
-; CHECK-NEXT:    lg %r1, 184(%r15) # 8-byte Reload
+; CHECK-NEXT:    lgrl %r1, d at GOT
 ; CHECK-NEXT:    a %r0, 0(%r1)
-; CHECK-NEXT:    lg %r1, 192(%r15) # 8-byte Reload
+; CHECK-NEXT:    lgrl %r1, e at GOT
 ; CHECK-NEXT:    a %r0, 0(%r1)
-; CHECK-NEXT:    lg %r1, 200(%r15) # 8-byte Reload
+; CHECK-NEXT:    lgrl %r1, f at GOT
 ; CHECK-NEXT:    a %r0, 0(%r1)
-; CHECK-NEXT:    lg %r1, 208(%r15) # 8-byte Reload
+; CHECK-NEXT:    lgrl %r1, g at GOT
 ; CHECK-NEXT:    a %r0, 0(%r1)
-; CHECK-NEXT:    lg %r1, 216(%r15) # 8-byte Reload
+; CHECK-NEXT:    lgrl %r1, h at GOT
 ; CHECK-NEXT:    a %r0, 0(%r1)
-; CHECK-NEXT:    lg %r1, 224(%r15) # 8-byte Reload
+; CHECK-NEXT:    lgrl %r1, i at GOT
 ; CHECK-NEXT:    a %r0, 0(%r1)
-; CHECK-NEXT:    lg %r1, 232(%r15) # 8-byte Reload
+; CHECK-NEXT:    lgrl %r1, j at GOT
 ; CHECK-NEXT:    a %r0, 0(%r1)
-; CHECK-NEXT:    lg %r1, 240(%r15) # 8-byte Reload
+; CHECK-NEXT:    lgrl %r1, k at GOT
 ; CHECK-NEXT:    a %r0, 0(%r1)
-; CHECK-NEXT:    lg %r1, 248(%r15) # 8-byte Reload
+; CHECK-NEXT:    lgrl %r1, l at GOT
 ; CHECK-NEXT:    a %r0, 0(%r1)
-; CHECK-NEXT:    lg %r1, 256(%r15) # 8-byte Reload
+; CHECK-NEXT:    lgrl %r1, m at GOT
 ; CHECK-NEXT:    a %r0, 0(%r1)
-; CHECK-NEXT:    lg %r1, 264(%r15) # 8-byte Reload
+; CHECK-NEXT:    lgrl %r1, n at GOT
 ; CHECK-NEXT:    a %r0, 0(%r1)
-; CHECK-NEXT:    lg %r1, 272(%r15) # 8-byte Reload
+; CHECK-NEXT:    lgrl %r1, o at GOT
 ; CHECK-NEXT:    a %r0, 0(%r1)
-; CHECK-NEXT:    lg %r1, 280(%r15) # 8-byte Reload
+; CHECK-NEXT:    lgrl %r1, p at GOT
 ; CHECK-NEXT:    a %r0, 0(%r1)
-; CHECK-NEXT:    lg %r1, 288(%r15) # 8-byte Reload
+; CHECK-NEXT:    lgrl %r1, q at GOT
 ; CHECK-NEXT:    a %r0, 0(%r1)
-; CHECK-NEXT:    lg %r1, 296(%r15) # 8-byte Reload
+; CHECK-NEXT:    lgrl %r1, r at GOT
 ; CHECK-NEXT:    a %r0, 0(%r1)
-; CHECK-NEXT:    lg %r1, 304(%r15) # 8-byte Reload
+; CHECK-NEXT:    lgrl %r1, s at GOT
 ; CHECK-NEXT:    a %r0, 0(%r1)
-; CHECK-NEXT:    lg %r1, 312(%r15) # 8-byte Reload
+; CHECK-NEXT:    lgrl %r1, t at GOT
 ; CHECK-NEXT:    a %r0, 0(%r1)
 ; CHECK-NEXT:    lgfr %r2, %r0
-; CHECK-NEXT:    ld %f8, 376(%r15) # 8-byte Reload
-; CHECK-NEXT:    ld %f9, 368(%r15) # 8-byte Reload
-; CHECK-NEXT:    ld %f10, 360(%r15) # 8-byte Reload
-; CHECK-NEXT:    ld %f11, 352(%r15) # 8-byte Reload
-; CHECK-NEXT:    ld %f12, 344(%r15) # 8-byte Reload
-; CHECK-NEXT:    ld %f13, 336(%r15) # 8-byte Reload
-; CHECK-NEXT:    ld %f14, 328(%r15) # 8-byte Reload
-; CHECK-NEXT:    ld %f15, 320(%r15) # 8-byte Reload
-; CHECK-NEXT:    lmg %r6, %r15, 432(%r15)
+; CHECK-NEXT:    ld %f8, 56(%r15) # 8-byte Reload
+; CHECK-NEXT:    ld %f9, 48(%r15) # 8-byte Reload
+; CHECK-NEXT:    ld %f10, 40(%r15) # 8-byte Reload
+; CHECK-NEXT:    ld %f11, 32(%r15) # 8-byte Reload
+; CHECK-NEXT:    ld %f12, 24(%r15) # 8-byte Reload
+; CHECK-NEXT:    ld %f13, 16(%r15) # 8-byte Reload
+; CHECK-NEXT:    ld %f14, 8(%r15) # 8-byte Reload
+; CHECK-NEXT:    ld %f15, 0(%r15) # 8-byte Reload
+; CHECK-NEXT:    lmg %r6, %r15, 112(%r15)
 ; CHECK-NEXT:    br %r14
 entry:
   store i32 1, ptr @t, align 4
diff --git a/llvm/test/CodeGen/SystemZ/fp-move-02.ll b/llvm/test/CodeGen/SystemZ/fp-move-02.ll
index 7f7ac7cda83d3..74043146f2026 100644
--- a/llvm/test/CodeGen/SystemZ/fp-move-02.ll
+++ b/llvm/test/CodeGen/SystemZ/fp-move-02.ll
@@ -147,8 +147,8 @@ define void @f10(double %extra) {
 ; CHECK-NEXT:    .cfi_offset %r13, -56
 ; CHECK-NEXT:    .cfi_offset %r14, -48
 ; CHECK-NEXT:    .cfi_offset %r15, -40
-; CHECK-NEXT:    aghi %r15, -184
-; CHECK-NEXT:    .cfi_def_cfa_offset 344
+; CHECK-NEXT:    aghi %r15, -176
+; CHECK-NEXT:    .cfi_def_cfa_offset 336
 ; CHECK-NEXT:    lgrl %r1, dptr at GOT
 ; CHECK-NEXT:    ldr %f1, %f0
 ; CHECK-NEXT:    adb %f1, 0(%r1)
@@ -156,9 +156,9 @@ define void @f10(double %extra) {
 ; CHECK-NEXT:    adb %f2, 0(%r1)
 ; CHECK-NEXT:    ldr %f3, %f0
 ; CHECK-NEXT:    adb %f3, 0(%r1)
-; CHECK-NEXT:    std %f1, 176(%r15) # 8-byte Spill
-; CHECK-NEXT:    std %f2, 168(%r15) # 8-byte Spill
-; CHECK-NEXT:    std %f3, 160(%r15) # 8-byte Spill
+; CHECK-NEXT:    std %f1, 168(%r15) # 8-byte Spill
+; CHECK-NEXT:    std %f2, 160(%r15) # 8-byte Spill
+; CHECK-NEXT:    lgdr %r11, %f3
 ; CHECK-NEXT:    ldr %f1, %f0
 ; CHECK-NEXT:    adb %f1, 0(%r1)
 ; CHECK-NEXT:    ldr %f2, %f0
@@ -176,28 +176,28 @@ define void @f10(double %extra) {
 ; CHECK-NEXT:    ldr %f2, %f0
 ; CHECK-NEXT:    adb %f2, 0(%r1)
 ; CHECK-NEXT:    adb %f0, 0(%r1)
-; CHECK-NEXT:    lgrl %r6, iptr at GOT
-; CHECK-NEXT:    lgdr %r13, %f1
-; CHECK-NEXT:    lgdr %r12, %f2
-; CHECK-NEXT:    lgdr %r11, %f0
+; CHECK-NEXT:    lgdr %r6, %f1
+; CHECK-NEXT:    lgdr %r13, %f2
+; CHECK-NEXT:    lgdr %r12, %f0
 ; CHECK-NEXT:  .LBB9_1: # %loop
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    brasl %r14, foo at PLT
 ; CHECK-NEXT:    lgr %r0, %r2
-; CHECK-NEXT:    og %r0, 176(%r15) # 8-byte Folded Reload
 ; CHECK-NEXT:    og %r0, 168(%r15) # 8-byte Folded Reload
 ; CHECK-NEXT:    og %r0, 160(%r15) # 8-byte Folded Reload
+; CHECK-NEXT:    ogr %r0, %r11
 ; CHECK-NEXT:    ogr %r0, %r10
 ; CHECK-NEXT:    ogr %r0, %r9
 ; CHECK-NEXT:    ogr %r0, %r8
 ; CHECK-NEXT:    ogr %r0, %r7
+; CHECK-NEXT:    ogr %r0, %r6
 ; CHECK-NEXT:    ogr %r0, %r13
 ; CHECK-NEXT:    ogr %r0, %r12
-; CHECK-NEXT:    ogr %r0, %r11
-; CHECK-NEXT:    stg %r0, 0(%r6)
+; CHECK-NEXT:    lgrl %r1, iptr at GOT
+; CHECK-NEXT:    stg %r0, 0(%r1)
 ; CHECK-NEXT:    cgijlh %r2, 1, .LBB9_1
 ; CHECK-NEXT:  # %bb.2: # %exit
-; CHECK-NEXT:    lmg %r6, %r15, 232(%r15)
+; CHECK-NEXT:    lmg %r6, %r15, 224(%r15)
 ; CHECK-NEXT:    br %r14
 entry:
   %double0 = load volatile double, ptr at dptr
diff --git a/llvm/test/CodeGen/SystemZ/remat.ll b/llvm/test/CodeGen/SystemZ/remat.ll
new file mode 100644
index 0000000000000..fee48774ef10a
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/remat.ll
@@ -0,0 +1,127 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; Test avoiding the unnecessary spill by marking loads as rematerializable and
+; just directly loading from where the argument was originally passed on the
+; stack - moving the load down to the remat site.
+
+; RUN: llc < %s -verify-machineinstrs -mtriple=s390x-linux-gnu -O1 | FileCheck %s
+
+define void @remat_load(
+; CHECK-LABEL: remat_load:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    stmg %r6, %r15, 48(%r15)
+; CHECK-NEXT:    aghi %r15, -232
+; CHECK-NEXT:    std %f8, 224(%r15) # 8-byte Spill
+; CHECK-NEXT:    std %f9, 216(%r15) # 8-byte Spill
+; CHECK-NEXT:    std %f10, 208(%r15) # 8-byte Spill
+; CHECK-NEXT:    std %f11, 200(%r15) # 8-byte Spill
+; CHECK-NEXT:    std %f12, 192(%r15) # 8-byte Spill
+; CHECK-NEXT:    std %f13, 184(%r15) # 8-byte Spill
+; CHECK-NEXT:    std %f14, 176(%r15) # 8-byte Spill
+; CHECK-NEXT:    std %f15, 168(%r15) # 8-byte Spill
+; CHECK-NEXT:    ld %f0, 448(%r15)
+; CHECK-NEXT:    le %f1, 444(%r15)
+; CHECK-NEXT:    le %f2, 436(%r15)
+; CHECK-NEXT:    le %f3, 428(%r15)
+; CHECK-NEXT:    lg %r14, 456(%r15)
+; CHECK-NEXT:    lb %r3, 399(%r15)
+; CHECK-NEXT:    st %r3, 160(%r15) # 4-byte Spill
+; CHECK-NEXT:    lh %r2, 406(%r15)
+; CHECK-NEXT:    st %r2, 164(%r15) # 4-byte Spill
+; CHECK-NEXT:    l %r1, 412(%r15)
+; CHECK-NEXT:    lg %r0, 416(%r15)
+; CHECK-NEXT:    stc %r3, 0(%r14)
+; CHECK-NEXT:    sth %r2, 0(%r14)
+; CHECK-NEXT:    st %r1, 0(%r14)
+; CHECK-NEXT:    stg %r0, 0(%r14)
+; CHECK-NEXT:    ste %f3, 0(%r14)
+; CHECK-NEXT:    ste %f2, 0(%r14)
+; CHECK-NEXT:    ste %f1, 0(%r14)
+; CHECK-NEXT:    std %f0, 0(%r14)
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    l %r0, 160(%r15) # 4-byte Reload
+; CHECK-NEXT:    stc %r0, 0(%r14)
+; CHECK-NEXT:    l %r0, 164(%r15) # 4-byte Reload
+; CHECK-NEXT:    sth %r0, 0(%r14)
+; CHECK-NEXT:    l %r0, 412(%r15)
+; CHECK-NEXT:    st %r0, 0(%r14)
+; CHECK-NEXT:    lg %r0, 416(%r15)
+; CHECK-NEXT:    stg %r0, 0(%r14)
+; CHECK-NEXT:    le %f0, 428(%r15)
+; CHECK-NEXT:    ste %f0, 0(%r14)
+; CHECK-NEXT:    le %f0, 436(%r15)
+; CHECK-NEXT:    ste %f0, 0(%r14)
+; CHECK-NEXT:    le %f0, 444(%r15)
+; CHECK-NEXT:    ste %f0, 0(%r14)
+; CHECK-NEXT:    ld %f0, 448(%r15)
+; CHECK-NEXT:    std %f0, 0(%r14)
+; CHECK-NEXT:    ld %f8, 224(%r15) # 8-byte Reload
+; CHECK-NEXT:    ld %f9, 216(%r15) # 8-byte Reload
+; CHECK-NEXT:    ld %f10, 208(%r15) # 8-byte Reload
+; CHECK-NEXT:    ld %f11, 200(%r15) # 8-byte Reload
+; CHECK-NEXT:    ld %f12, 192(%r15) # 8-byte Reload
+; CHECK-NEXT:    ld %f13, 184(%r15) # 8-byte Reload
+; CHECK-NEXT:    ld %f14, 176(%r15) # 8-byte Reload
+; CHECK-NEXT:    ld %f15, 168(%r15) # 8-byte Reload
+; CHECK-NEXT:    lmg %r6, %r15, 280(%r15)
+; CHECK-NEXT:    br %r14
+    i64 %i0, i64 %i1, i64 %i2, i64 %i3, i64 %i4, ; r2-r6
+    double %f0, double %f1, double %f2, double %f3, ; f0, f2, f4, f6
+    i8 %stackarg0,
+    i16 %stackarg1,
+    i32 %stackarg2,
+    i64 %stackarg3,
+    float %stackarg4,
+    float %stackarg5,
+    float %stackarg6,
+    double %stackarg7,
+    ptr %p
+) nounwind {
+entry:
+  ; Add a use of the stack arguments here so that we will have to load them from
+  ; the stack before the inline asm. Otherwise we would be exercising the
+  ; machine scheduler, not rematerialization.
+  store volatile i8 %stackarg0, ptr %p
+  store volatile i16 %stackarg1, ptr %p
+  store volatile i32 %stackarg2, ptr %p
+  store volatile i64 %stackarg3, ptr %p
+  store volatile float %stackarg4, ptr %p
+  store volatile float %stackarg5, ptr %p
+  store volatile float %stackarg6, ptr %p
+  store volatile double %stackarg7, ptr %p
+  ; Clobber registers to force re-loads
+  tail call void asm sideeffect "", "~{r0},~{r1},~{r2},~{r3},~{r4},~{r5},~{r6},~{r7},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{f0},~{f1},~{f2},~{f3},~{f4},~{f5},~{f6},~{f7},~{f8},~{f9},~{f10},~{f11},~{f12},~{f13},~{f14},~{f15}"()
+  ; Now use them after spilling everything to force rematerialization
+  store volatile i8 %stackarg0, ptr %p
+  store volatile i16 %stackarg1, ptr %p
+  store volatile i32 %stackarg2, ptr %p
+  store volatile i64 %stackarg3, ptr %p
+  store volatile float %stackarg4, ptr %p
+  store volatile float %stackarg5, ptr %p
+  store volatile float %stackarg6, ptr %p
+  store volatile double %stackarg7, ptr %p
+  ret void
+}
+
+ at const = external constant i32
+; Remat the load of the constant global.
+define i32 @constglobal_load() nounwind {
+; CHECK-LABEL: constglobal_load:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    stmg %r6, %r15, 48(%r15)
+; CHECK-NEXT:    aghi %r15, -168
+; CHECK-NEXT:    lgrl %r1, const at GOT
+; CHECK-NEXT:    lhi %r0, 1
+; CHECK-NEXT:    a %r0, 0(%r1)
+; CHECK-NEXT:    st %r0, 164(%r15) # 4-byte Spill
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    l %r2, 164(%r15) # 4-byte Reload
+; CHECK-NEXT:    lmg %r6, %r15, 216(%r15)
+; CHECK-NEXT:    br %r14
+entry:
+  %global = load i32, ptr @const
+  tail call void asm sideeffect "", "~{r0},~{r1},~{r2},~{r3},~{r4},~{r5},~{r6},~{r7},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14} ~{r15}"()
+  %a = add i32 %global, 1
+  ret i32 %a
+}



More information about the llvm-commits mailing list