[llvm] [SystemZ, DebugInfo] Support InstrRef-based Debug Value Tracking in the SystemZ backend (PR #132415)
Dominik Steenken via llvm-commits
llvm-commits at lists.llvm.org
Fri Mar 21 09:13:06 PDT 2025
https://github.com/dominik-steenken updated https://github.com/llvm/llvm-project/pull/132415
>From e032f69765678980ca157d642572c1ab8da4e02b Mon Sep 17 00:00:00 2001
From: Dominik Steenken <dost at de.ibm.com>
Date: Fri, 17 Jan 2025 11:02:22 +0100
Subject: [PATCH 1/7] [SystemZ] SystemZDebugInstrInfo 1/6 Implement basic
isCopyInstrImpl As a first step toward implementing SystemZ support for
instr-ref-based debug info tracking, this commit adds a basic implementation
for the previously absent `SystemZInstrInfo::isCopyInstrImpl`. This is
accomplished by adding a new flag called `isMoveReg` on the relevant
instructions and calling upon that bit of information to implement the
function. Which instructions to add the flag to was based on the
implementation of `SystemZInstrInfo::copyPhysReg`.
---
llvm/lib/Target/SystemZ/SystemZInstrFP.td | 14 +++++-----
llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp | 26 +++++++++++++++++++
llvm/lib/Target/SystemZ/SystemZInstrInfo.h | 2 ++
llvm/lib/Target/SystemZ/SystemZInstrInfo.td | 6 +++--
llvm/lib/Target/SystemZ/SystemZInstrVector.td | 8 +++---
5 files changed, 45 insertions(+), 11 deletions(-)
diff --git a/llvm/lib/Target/SystemZ/SystemZInstrFP.td b/llvm/lib/Target/SystemZ/SystemZInstrFP.td
index c171982b45692..bef38b9cb809b 100644
--- a/llvm/lib/Target/SystemZ/SystemZInstrFP.td
+++ b/llvm/lib/Target/SystemZ/SystemZInstrFP.td
@@ -42,13 +42,15 @@ let isAsCheapAsAMove = 1, isMoveImm = 1 in {
}
// Moves between two floating-point registers.
-def LER : UnaryRR <"ler", 0x38, null_frag, FP32, FP32>;
-def LDR : UnaryRR <"ldr", 0x28, null_frag, FP64, FP64>;
-def LXR : UnaryRRE<"lxr", 0xB365, null_frag, FP128, FP128>;
+let isMoveReg = 1 in {
+ def LER : UnaryRR <"ler", 0x38, null_frag, FP32, FP32>;
+ def LDR : UnaryRR <"ldr", 0x28, null_frag, FP64, FP64>;
+ def LXR : UnaryRRE<"lxr", 0xB365, null_frag, FP128, FP128>;
+ // For z13 we prefer LDR over LER to avoid partial register dependencies.
+ let isCodeGenOnly = 1 in
+ def LDR32 : UnaryRR<"ldr", 0x28, null_frag, FP32, FP32>;
+}
-// For z13 we prefer LDR over LER to avoid partial register dependencies.
-let isCodeGenOnly = 1 in
- def LDR32 : UnaryRR<"ldr", 0x28, null_frag, FP32, FP32>;
// Moves between two floating-point registers that also set the condition
// codes. Note that these instructions will turn SNaNs into QNaNs and should
diff --git a/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp b/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
index ab2e5b3c9a190..a54552a759379 100644
--- a/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
@@ -39,6 +39,7 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Target/TargetMachine.h"
+//#include "SystemZRegisterInfo.h"
#include <cassert>
#include <cstdint>
#include <iterator>
@@ -2316,3 +2317,28 @@ bool SystemZInstrInfo::getConstValDefinedInReg(const MachineInstr &MI,
return false;
}
+
+std::optional<DestSourcePair> SystemZInstrInfo::isCopyInstrImpl(const MachineInstr &MI) const {
+ // if MI is a simple single-register copy operation, return operand pair
+ if (MI.isMoveReg())
+ return DestSourcePair(MI.getOperand(0), MI.getOperand(1));
+ // more complicated cases might be handled here.
+ /*
+ switch (MI.getOpcode()) {
+ // VMHRG would work but then how can i turn a register into a MachineOperand?
+ case (SystemZ::VMRHG): {
+ MCRegister OpReg1 = MI.getOperand(1).getReg();
+ MCRegister OpReg2 = MI.getOperand(2).getReg();
+ // Check if OpReg1 and OpReg2 together can form an FP128 register.
+ MCRegister FirstReg = (OpReg1.id() < OpReg2.id()) ? OpReg1 : OpReg2;
+ MCRegister SecondReg = (OpReg1.id() < OpReg2.id()) ? OpReg2 : OpReg1;
+ MCRegister FP1 = RI.getMatchingSuperReg(FirstReg, SystemZ::subreg_h64, &SystemZ::FP128BitRegClass);
+ MCRegister FP2 = RI.getMatchingSuperReg(SecondReg, SystemZ::subreg_h64, &SystemZ::FP128BitRegClass);
+ if (FP1 != FP2)
+ return std::nullopt;
+ return DestSourcePair(MI.getOperand(0), MachineOperand(FP1));
+ }
+ }
+ */
+ return std::nullopt;
+}
diff --git a/llvm/lib/Target/SystemZ/SystemZInstrInfo.h b/llvm/lib/Target/SystemZ/SystemZInstrInfo.h
index 5f09ad508905d..bfbfcc24d9f70 100644
--- a/llvm/lib/Target/SystemZ/SystemZInstrInfo.h
+++ b/llvm/lib/Target/SystemZ/SystemZInstrInfo.h
@@ -386,6 +386,8 @@ class SystemZInstrInfo : public SystemZGenInstrInfo {
bool getConstValDefinedInReg(const MachineInstr &MI, const Register Reg,
int64_t &ImmVal) const override;
+
+ std::optional<DestSourcePair> isCopyInstrImpl(const MachineInstr &MI) const override;
};
} // end namespace llvm
diff --git a/llvm/lib/Target/SystemZ/SystemZInstrInfo.td b/llvm/lib/Target/SystemZ/SystemZInstrInfo.td
index 2acb4b0339d32..4f75e0132610e 100644
--- a/llvm/lib/Target/SystemZ/SystemZInstrInfo.td
+++ b/llvm/lib/Target/SystemZ/SystemZInstrInfo.td
@@ -424,8 +424,10 @@ defm CondStore64 : CondStores<GR64, simple_store,
//===----------------------------------------------------------------------===//
// Register moves.
-def LR : UnaryRR <"lr", 0x18, null_frag, GR32, GR32>;
-def LGR : UnaryRRE<"lgr", 0xB904, null_frag, GR64, GR64>;
+let isMoveReg = 1 in {
+ def LR : UnaryRR <"lr", 0x18, null_frag, GR32, GR32>;
+ def LGR : UnaryRRE<"lgr", 0xB904, null_frag, GR64, GR64>;
+}
let Defs = [CC], CCValues = 0xE, CompareZeroCCMask = 0xE in {
def LTR : UnaryRR <"ltr", 0x12, null_frag, GR32, GR32>;
diff --git a/llvm/lib/Target/SystemZ/SystemZInstrVector.td b/llvm/lib/Target/SystemZ/SystemZInstrVector.td
index 3187d91b00046..7043850d9eca5 100644
--- a/llvm/lib/Target/SystemZ/SystemZInstrVector.td
+++ b/llvm/lib/Target/SystemZ/SystemZInstrVector.td
@@ -12,9 +12,11 @@
let Predicates = [FeatureVector] in {
// Register move.
- def VLR : UnaryVRRa<"vlr", 0xE756, null_frag, v128any, v128any>;
- def VLR32 : UnaryAliasVRR<null_frag, v32sb, v32sb>;
- def VLR64 : UnaryAliasVRR<null_frag, v64db, v64db>;
+ let isMoveReg = 1 in {
+ def VLR : UnaryVRRa<"vlr", 0xE756, null_frag, v128any, v128any>;
+ def VLR32 : UnaryAliasVRR<null_frag, v32sb, v32sb>;
+ def VLR64 : UnaryAliasVRR<null_frag, v64db, v64db>;
+ }
// Load GR from VR element.
def VLGV : BinaryVRScGeneric<"vlgv", 0xE721>;
>From 18bcb0943ba24cd0138400c014cae8adc966f15f Mon Sep 17 00:00:00 2001
From: Dominik Steenken <dominik.steenken at gmail.com>
Date: Tue, 21 Jan 2025 10:38:02 +0100
Subject: [PATCH 2/7] [SystemZ] SystemZDebugInstrInfo 2/6 Add
is[LoadFrom|StoreTo]StackSlotPostFE This commit implements
`SystemZInstrInfo::isLoadFromStackSlotPostFE`, as well as
`SystemZInstrInfo::isStoreToStackSlotPostFE`. The implementation relies upon
the presence of MachineMemoryOperands on the relevant `MachineInstr`s in
order to access the `FrameIndex` post frame index elimination. Implementing
these functions is necessary to support instr-ref based debug info tracking.
However, it also changes the behavior of the AsmPrinter, since it will now be
able to properly detect non-folded spills and reloads, so this changes a
number of tests that were checking specifically for folded reloads. Note that
there are some tests that still check for vst and vl as folded spills/reloads
even though they should be straight reloads. This will be addressed in a
future commit.
---
llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp | 44 +
llvm/lib/Target/SystemZ/SystemZInstrInfo.h | 4 +
.../CodeGen/SystemZ/builtin-setjmp-alloca.ll | 64 +-
.../CodeGen/SystemZ/builtin-setjmp-spills.ll | 112 +--
llvm/test/CodeGen/SystemZ/builtin-setjmp.ll | 64 +-
.../CodeGen/SystemZ/fmuladd-soft-float.ll | 14 +-
.../test/CodeGen/SystemZ/foldmemop-imm-02.mir | 4 +-
.../test/CodeGen/SystemZ/foldmemop-vec-cc.mir | 2 +-
.../CodeGen/SystemZ/foldmemop-vec-cmp.mir | 8 +-
.../CodeGen/SystemZ/foldmemop-vec-unary.mir | 8 +-
llvm/test/CodeGen/SystemZ/fp-move-02.ll | 102 +--
llvm/test/CodeGen/SystemZ/frame-22.ll | 20 +-
llvm/test/CodeGen/SystemZ/int-uadd-03.ll | 4 +-
llvm/test/CodeGen/SystemZ/int-usub-03.ll | 4 +-
.../vector-constrained-fp-intrinsics.ll | 852 +++++++++---------
.../CodeGen/SystemZ/zos-prologue-epilog.ll | 32 +-
16 files changed, 693 insertions(+), 645 deletions(-)
diff --git a/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp b/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
index a54552a759379..c47a101ad5a60 100644
--- a/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
@@ -347,6 +347,50 @@ Register SystemZInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
return isSimpleMove(MI, FrameIndex, SystemZII::SimpleBDXStore);
}
+Register SystemZInstrInfo::isLoadFromStackSlotPostFE(const MachineInstr &MI,
+ int &FrameIndex) const {
+ // if this is not a simple load from memory, it's not a load from stack slot either.
+ const MCInstrDesc &MCID = MI.getDesc();
+ if (!(MCID.TSFlags & SystemZII::SimpleBDXLoad))
+ return 0;
+
+ Register Reg;
+ // If the frame index is still there, use it.
+ if ((Reg = isLoadFromStackSlot(MI, FrameIndex)))
+ return Reg;
+ SmallVector<const MachineMemOperand *, 1> Accesses;
+ // Otherwise, attempt to derive frame index from MachineMemOperands
+ if (hasLoadFromStackSlot(MI, Accesses)) {
+ FrameIndex =
+ cast<FixedStackPseudoSourceValue>(Accesses.front()->getPseudoValue())
+ ->getFrameIndex();
+ return MI.getOperand(0).getReg();
+ }
+ return 0;
+}
+
+Register SystemZInstrInfo::isStoreToStackSlotPostFE(const MachineInstr &MI,
+ int &FrameIndex) const {
+ // if this is not a simple store to memory, it's not a store to stack slot either.
+ const MCInstrDesc &MCID = MI.getDesc();
+ if (!(MCID.TSFlags & SystemZII::SimpleBDXStore))
+ return 0;
+
+ Register Reg;
+ // If the frame index is still there, use it.
+ if ((Reg = isStoreToStackSlot(MI, FrameIndex)))
+ return Reg;
+ // Otherwise, attempt to derive frame index from MachineMemOperands
+ SmallVector<const MachineMemOperand *, 1> Accesses;
+ if (hasStoreToStackSlot(MI, Accesses)) {
+ FrameIndex =
+ cast<FixedStackPseudoSourceValue>(Accesses.front()->getPseudoValue())
+ ->getFrameIndex();
+ return MI.getOperand(0).getReg();
+ }
+ return 0;
+}
+
bool SystemZInstrInfo::isStackSlotCopy(const MachineInstr &MI,
int &DestFrameIndex,
int &SrcFrameIndex) const {
diff --git a/llvm/lib/Target/SystemZ/SystemZInstrInfo.h b/llvm/lib/Target/SystemZ/SystemZInstrInfo.h
index bfbfcc24d9f70..5d972a504be54 100644
--- a/llvm/lib/Target/SystemZ/SystemZInstrInfo.h
+++ b/llvm/lib/Target/SystemZ/SystemZInstrInfo.h
@@ -232,6 +232,10 @@ class SystemZInstrInfo : public SystemZGenInstrInfo {
int &FrameIndex) const override;
Register isStoreToStackSlot(const MachineInstr &MI,
int &FrameIndex) const override;
+ Register isLoadFromStackSlotPostFE(const MachineInstr &MI,
+ int &FrameIndex) const override;
+ Register isStoreToStackSlotPostFE(const MachineInstr &MI,
+ int &FrameIndex) const override;
bool isStackSlotCopy(const MachineInstr &MI, int &DestFrameIndex,
int &SrcFrameIndex) const override;
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
diff --git a/llvm/test/CodeGen/SystemZ/builtin-setjmp-alloca.ll b/llvm/test/CodeGen/SystemZ/builtin-setjmp-alloca.ll
index f714599f9a8f2..9bd6ff8eea4c5 100644
--- a/llvm/test/CodeGen/SystemZ/builtin-setjmp-alloca.ll
+++ b/llvm/test/CodeGen/SystemZ/builtin-setjmp-alloca.ll
@@ -30,14 +30,14 @@ define signext i32 @foo() "frame-pointer"="all" {
; CHECK-NEXT: .cfi_def_cfa_offset 400
; CHECK-NEXT: lgr %r11, %r15
; CHECK-NEXT: .cfi_def_cfa_register %r11
-; CHECK-NEXT: std %f8, 232(%r11) # 8-byte Folded Spill
-; CHECK-NEXT: std %f9, 224(%r11) # 8-byte Folded Spill
-; CHECK-NEXT: std %f10, 216(%r11) # 8-byte Folded Spill
-; CHECK-NEXT: std %f11, 208(%r11) # 8-byte Folded Spill
-; CHECK-NEXT: std %f12, 200(%r11) # 8-byte Folded Spill
-; CHECK-NEXT: std %f13, 192(%r11) # 8-byte Folded Spill
-; CHECK-NEXT: std %f14, 184(%r11) # 8-byte Folded Spill
-; CHECK-NEXT: std %f15, 176(%r11) # 8-byte Folded Spill
+; CHECK-NEXT: std %f8, 232(%r11) # 8-byte Spill
+; CHECK-NEXT: std %f9, 224(%r11) # 8-byte Spill
+; CHECK-NEXT: std %f10, 216(%r11) # 8-byte Spill
+; CHECK-NEXT: std %f11, 208(%r11) # 8-byte Spill
+; CHECK-NEXT: std %f12, 200(%r11) # 8-byte Spill
+; CHECK-NEXT: std %f13, 192(%r11) # 8-byte Spill
+; CHECK-NEXT: std %f14, 184(%r11) # 8-byte Spill
+; CHECK-NEXT: std %f15, 176(%r11) # 8-byte Spill
; CHECK-NEXT: .cfi_offset %f8, -168
; CHECK-NEXT: .cfi_offset %f9, -176
; CHECK-NEXT: .cfi_offset %f10, -184
@@ -59,14 +59,14 @@ define signext i32 @foo() "frame-pointer"="all" {
; CHECK-NEXT: .LBB0_2: # %entry
; CHECK-NEXT: lg %r1, 168(%r11)
; CHECK-NEXT: lgf %r2, 0(%r1)
-; CHECK-NEXT: ld %f8, 232(%r11) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f9, 224(%r11) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f10, 216(%r11) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f11, 208(%r11) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f12, 200(%r11) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f13, 192(%r11) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f14, 184(%r11) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f15, 176(%r11) # 8-byte Folded Reload
+; CHECK-NEXT: ld %f8, 232(%r11) # 8-byte Reload
+; CHECK-NEXT: ld %f9, 224(%r11) # 8-byte Reload
+; CHECK-NEXT: ld %f10, 216(%r11) # 8-byte Reload
+; CHECK-NEXT: ld %f11, 208(%r11) # 8-byte Reload
+; CHECK-NEXT: ld %f12, 200(%r11) # 8-byte Reload
+; CHECK-NEXT: ld %f13, 192(%r11) # 8-byte Reload
+; CHECK-NEXT: ld %f14, 184(%r11) # 8-byte Reload
+; CHECK-NEXT: ld %f15, 176(%r11) # 8-byte Reload
; CHECK-NEXT: lmg %r6, %r15, 288(%r11)
; CHECK-NEXT: br %r14
entry:
@@ -101,14 +101,14 @@ define signext i32 @foo1() "backchain" "frame-pointer"="all" {
; CHECK-NEXT: stg %r1, 0(%r15)
; CHECK-NEXT: lgr %r11, %r15
; CHECK-NEXT: .cfi_def_cfa_register %r11
-; CHECK-NEXT: std %f8, 232(%r11) # 8-byte Folded Spill
-; CHECK-NEXT: std %f9, 224(%r11) # 8-byte Folded Spill
-; CHECK-NEXT: std %f10, 216(%r11) # 8-byte Folded Spill
-; CHECK-NEXT: std %f11, 208(%r11) # 8-byte Folded Spill
-; CHECK-NEXT: std %f12, 200(%r11) # 8-byte Folded Spill
-; CHECK-NEXT: std %f13, 192(%r11) # 8-byte Folded Spill
-; CHECK-NEXT: std %f14, 184(%r11) # 8-byte Folded Spill
-; CHECK-NEXT: std %f15, 176(%r11) # 8-byte Folded Spill
+; CHECK-NEXT: std %f8, 232(%r11) # 8-byte Spill
+; CHECK-NEXT: std %f9, 224(%r11) # 8-byte Spill
+; CHECK-NEXT: std %f10, 216(%r11) # 8-byte Spill
+; CHECK-NEXT: std %f11, 208(%r11) # 8-byte Spill
+; CHECK-NEXT: std %f12, 200(%r11) # 8-byte Spill
+; CHECK-NEXT: std %f13, 192(%r11) # 8-byte Spill
+; CHECK-NEXT: std %f14, 184(%r11) # 8-byte Spill
+; CHECK-NEXT: std %f15, 176(%r11) # 8-byte Spill
; CHECK-NEXT: .cfi_offset %f8, -168
; CHECK-NEXT: .cfi_offset %f9, -176
; CHECK-NEXT: .cfi_offset %f10, -184
@@ -132,14 +132,14 @@ define signext i32 @foo1() "backchain" "frame-pointer"="all" {
; CHECK-NEXT: .LBB1_2: # %entry
; CHECK-NEXT: lg %r1, 168(%r11)
; CHECK-NEXT: lgf %r2, 0(%r1)
-; CHECK-NEXT: ld %f8, 232(%r11) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f9, 224(%r11) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f10, 216(%r11) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f11, 208(%r11) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f12, 200(%r11) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f13, 192(%r11) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f14, 184(%r11) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f15, 176(%r11) # 8-byte Folded Reload
+; CHECK-NEXT: ld %f8, 232(%r11) # 8-byte Reload
+; CHECK-NEXT: ld %f9, 224(%r11) # 8-byte Reload
+; CHECK-NEXT: ld %f10, 216(%r11) # 8-byte Reload
+; CHECK-NEXT: ld %f11, 208(%r11) # 8-byte Reload
+; CHECK-NEXT: ld %f12, 200(%r11) # 8-byte Reload
+; CHECK-NEXT: ld %f13, 192(%r11) # 8-byte Reload
+; CHECK-NEXT: ld %f14, 184(%r11) # 8-byte Reload
+; CHECK-NEXT: ld %f15, 176(%r11) # 8-byte Reload
; CHECK-NEXT: lmg %r6, %r15, 288(%r11)
; CHECK-NEXT: br %r14
entry:
diff --git a/llvm/test/CodeGen/SystemZ/builtin-setjmp-spills.ll b/llvm/test/CodeGen/SystemZ/builtin-setjmp-spills.ll
index 65657ec9f1826..5626f45ac8bbb 100644
--- a/llvm/test/CodeGen/SystemZ/builtin-setjmp-spills.ll
+++ b/llvm/test/CodeGen/SystemZ/builtin-setjmp-spills.ll
@@ -49,14 +49,14 @@ define signext i32 @func() {
; CHECK-NEXT: .cfi_offset %r15, -40
; CHECK-NEXT: aghi %r15, -384
; CHECK-NEXT: .cfi_def_cfa_offset 544
-; CHECK-NEXT: std %f8, 376(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f9, 368(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f10, 360(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f11, 352(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f12, 344(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f13, 336(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f14, 328(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f15, 320(%r15) # 8-byte Folded Spill
+; CHECK-NEXT: std %f8, 376(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f9, 368(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f10, 360(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f11, 352(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f12, 344(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f13, 336(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f14, 328(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f15, 320(%r15) # 8-byte Spill
; CHECK-NEXT: .cfi_offset %f8, -168
; CHECK-NEXT: .cfi_offset %f9, -176
; CHECK-NEXT: .cfi_offset %f10, -184
@@ -67,64 +67,64 @@ define signext i32 @func() {
; CHECK-NEXT: .cfi_offset %f15, -224
; CHECK-NEXT: lgrl %r1, t at GOT
; CHECK-NEXT: lgrl %r2, s at GOT
-; CHECK-NEXT: stg %r1, 312(%r15) # 8-byte Folded Spill
+; CHECK-NEXT: stg %r1, 312(%r15) # 8-byte Spill
; CHECK-NEXT: mvhi 0(%r1), 1
; CHECK-NEXT: lgrl %r1, r at GOT
; CHECK-NEXT: lgrl %r3, q at GOT
-; CHECK-NEXT: stg %r2, 304(%r15) # 8-byte Folded Spill
+; CHECK-NEXT: stg %r2, 304(%r15) # 8-byte Spill
; CHECK-NEXT: mvhi 0(%r2), 1
; CHECK-NEXT: lgrl %r2, p at GOT
-; CHECK-NEXT: stg %r1, 296(%r15) # 8-byte Folded Spill
+; CHECK-NEXT: stg %r1, 296(%r15) # 8-byte Spill
; CHECK-NEXT: mvhi 0(%r1), 1
-; CHECK-NEXT: stg %r3, 288(%r15) # 8-byte Folded Spill
+; CHECK-NEXT: stg %r3, 288(%r15) # 8-byte Spill
; CHECK-NEXT: mvhi 0(%r3), 1
; CHECK-NEXT: lgrl %r1, o at GOT
-; CHECK-NEXT: stg %r2, 280(%r15) # 8-byte Folded Spill
+; CHECK-NEXT: stg %r2, 280(%r15) # 8-byte Spill
; CHECK-NEXT: mvhi 0(%r2), 1
; CHECK-NEXT: lgrl %r2, n at GOT
; CHECK-NEXT: lgrl %r3, m at GOT
-; CHECK-NEXT: stg %r1, 272(%r15) # 8-byte Folded Spill
+; CHECK-NEXT: stg %r1, 272(%r15) # 8-byte Spill
; CHECK-NEXT: mvhi 0(%r1), 1
; CHECK-NEXT: lgrl %r1, l at GOT
-; CHECK-NEXT: stg %r2, 264(%r15) # 8-byte Folded Spill
+; CHECK-NEXT: stg %r2, 264(%r15) # 8-byte Spill
; CHECK-NEXT: mvhi 0(%r2), 1
-; CHECK-NEXT: stg %r3, 256(%r15) # 8-byte Folded Spill
+; CHECK-NEXT: stg %r3, 256(%r15) # 8-byte Spill
; CHECK-NEXT: mvhi 0(%r3), 1
; CHECK-NEXT: lgrl %r2, k at GOT
-; CHECK-NEXT: stg %r1, 248(%r15) # 8-byte Folded Spill
+; CHECK-NEXT: stg %r1, 248(%r15) # 8-byte Spill
; CHECK-NEXT: mvhi 0(%r1), 1
; CHECK-NEXT: lgrl %r1, j at GOT
; CHECK-NEXT: lgrl %r3, i at GOT
-; CHECK-NEXT: stg %r2, 240(%r15) # 8-byte Folded Spill
+; CHECK-NEXT: stg %r2, 240(%r15) # 8-byte Spill
; CHECK-NEXT: mvhi 0(%r2), 1
; CHECK-NEXT: lgrl %r2, h at GOT
-; CHECK-NEXT: stg %r1, 232(%r15) # 8-byte Folded Spill
+; CHECK-NEXT: stg %r1, 232(%r15) # 8-byte Spill
; CHECK-NEXT: mvhi 0(%r1), 1
-; CHECK-NEXT: stg %r3, 224(%r15) # 8-byte Folded Spill
+; CHECK-NEXT: stg %r3, 224(%r15) # 8-byte Spill
; CHECK-NEXT: mvhi 0(%r3), 1
; CHECK-NEXT: lgrl %r1, g at GOT
-; CHECK-NEXT: stg %r2, 216(%r15) # 8-byte Folded Spill
+; CHECK-NEXT: stg %r2, 216(%r15) # 8-byte Spill
; CHECK-NEXT: mvhi 0(%r2), 1
; CHECK-NEXT: lgrl %r2, f at GOT
; CHECK-NEXT: lgrl %r3, e at GOT
-; CHECK-NEXT: stg %r1, 208(%r15) # 8-byte Folded Spill
+; CHECK-NEXT: stg %r1, 208(%r15) # 8-byte Spill
; CHECK-NEXT: mvhi 0(%r1), 1
; CHECK-NEXT: lgrl %r1, d at GOT
-; CHECK-NEXT: stg %r2, 200(%r15) # 8-byte Folded Spill
+; CHECK-NEXT: stg %r2, 200(%r15) # 8-byte Spill
; CHECK-NEXT: mvhi 0(%r2), 1
-; CHECK-NEXT: stg %r3, 192(%r15) # 8-byte Folded Spill
+; CHECK-NEXT: stg %r3, 192(%r15) # 8-byte Spill
; CHECK-NEXT: mvhi 0(%r3), 1
; CHECK-NEXT: lgrl %r2, c at GOT
-; CHECK-NEXT: stg %r1, 184(%r15) # 8-byte Folded Spill
+; CHECK-NEXT: stg %r1, 184(%r15) # 8-byte Spill
; CHECK-NEXT: mvhi 0(%r1), 1
; CHECK-NEXT: lgrl %r3, b at GOT
; CHECK-NEXT: lgrl %r4, a at GOT
-; CHECK-NEXT: stg %r2, 176(%r15) # 8-byte Folded Spill
+; CHECK-NEXT: stg %r2, 176(%r15) # 8-byte Spill
; CHECK-NEXT: mvhi 0(%r2), 1
; CHECK-NEXT: lgrl %r1, buf at GOT
-; CHECK-NEXT: stg %r3, 168(%r15) # 8-byte Folded Spill
+; CHECK-NEXT: stg %r3, 168(%r15) # 8-byte Spill
; CHECK-NEXT: mvhi 0(%r3), 1
-; CHECK-NEXT: stg %r4, 160(%r15) # 8-byte Folded Spill
+; CHECK-NEXT: stg %r4, 160(%r15) # 8-byte Spill
; CHECK-NEXT: mvhi 0(%r4), 1
; CHECK-NEXT: larl %r0, .LBB0_2
; CHECK-NEXT: stg %r0, 8(%r1)
@@ -136,55 +136,55 @@ define signext i32 @func() {
; CHECK-NEXT: # %entry
; CHECK-NEXT: lhi %r0, 1
; CHECK-NEXT: .LBB0_3: # %entry
-; CHECK-NEXT: lg %r1, 160(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: lg %r1, 160(%r15) # 8-byte Reload
; CHECK-NEXT: a %r0, 0(%r1)
-; CHECK-NEXT: lg %r1, 168(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: lg %r1, 168(%r15) # 8-byte Reload
; CHECK-NEXT: a %r0, 0(%r1)
-; CHECK-NEXT: lg %r1, 176(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: lg %r1, 176(%r15) # 8-byte Reload
; CHECK-NEXT: a %r0, 0(%r1)
-; CHECK-NEXT: lg %r1, 184(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: lg %r1, 184(%r15) # 8-byte Reload
; CHECK-NEXT: a %r0, 0(%r1)
-; CHECK-NEXT: lg %r1, 192(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: lg %r1, 192(%r15) # 8-byte Reload
; CHECK-NEXT: a %r0, 0(%r1)
-; CHECK-NEXT: lg %r1, 200(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: lg %r1, 200(%r15) # 8-byte Reload
; CHECK-NEXT: a %r0, 0(%r1)
-; CHECK-NEXT: lg %r1, 208(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: lg %r1, 208(%r15) # 8-byte Reload
; CHECK-NEXT: a %r0, 0(%r1)
-; CHECK-NEXT: lg %r1, 216(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: lg %r1, 216(%r15) # 8-byte Reload
; CHECK-NEXT: a %r0, 0(%r1)
-; CHECK-NEXT: lg %r1, 224(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: lg %r1, 224(%r15) # 8-byte Reload
; CHECK-NEXT: a %r0, 0(%r1)
-; CHECK-NEXT: lg %r1, 232(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: lg %r1, 232(%r15) # 8-byte Reload
; CHECK-NEXT: a %r0, 0(%r1)
-; CHECK-NEXT: lg %r1, 240(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: lg %r1, 240(%r15) # 8-byte Reload
; CHECK-NEXT: a %r0, 0(%r1)
-; CHECK-NEXT: lg %r1, 248(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: lg %r1, 248(%r15) # 8-byte Reload
; CHECK-NEXT: a %r0, 0(%r1)
-; CHECK-NEXT: lg %r1, 256(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: lg %r1, 256(%r15) # 8-byte Reload
; CHECK-NEXT: a %r0, 0(%r1)
-; CHECK-NEXT: lg %r1, 264(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: lg %r1, 264(%r15) # 8-byte Reload
; CHECK-NEXT: a %r0, 0(%r1)
-; CHECK-NEXT: lg %r1, 272(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: lg %r1, 272(%r15) # 8-byte Reload
; CHECK-NEXT: a %r0, 0(%r1)
-; CHECK-NEXT: lg %r1, 280(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: lg %r1, 280(%r15) # 8-byte Reload
; CHECK-NEXT: a %r0, 0(%r1)
-; CHECK-NEXT: lg %r1, 288(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: lg %r1, 288(%r15) # 8-byte Reload
; CHECK-NEXT: a %r0, 0(%r1)
-; CHECK-NEXT: lg %r1, 296(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: lg %r1, 296(%r15) # 8-byte Reload
; CHECK-NEXT: a %r0, 0(%r1)
-; CHECK-NEXT: lg %r1, 304(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: lg %r1, 304(%r15) # 8-byte Reload
; CHECK-NEXT: a %r0, 0(%r1)
-; CHECK-NEXT: lg %r1, 312(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: lg %r1, 312(%r15) # 8-byte Reload
; CHECK-NEXT: a %r0, 0(%r1)
; CHECK-NEXT: lgfr %r2, %r0
-; CHECK-NEXT: ld %f8, 376(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f9, 368(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f10, 360(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f11, 352(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f12, 344(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f13, 336(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f14, 328(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f15, 320(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: ld %f8, 376(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f9, 368(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f10, 360(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f11, 352(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f12, 344(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f13, 336(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f14, 328(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f15, 320(%r15) # 8-byte Reload
; CHECK-NEXT: lmg %r6, %r15, 432(%r15)
; CHECK-NEXT: br %r14
entry:
diff --git a/llvm/test/CodeGen/SystemZ/builtin-setjmp.ll b/llvm/test/CodeGen/SystemZ/builtin-setjmp.ll
index cfeba5a051ad1..37706c7bcd212 100644
--- a/llvm/test/CodeGen/SystemZ/builtin-setjmp.ll
+++ b/llvm/test/CodeGen/SystemZ/builtin-setjmp.ll
@@ -26,14 +26,14 @@ define void @foo() {
; CHECK-NEXT: .cfi_offset %r15, -40
; CHECK-NEXT: aghi %r15, -64
; CHECK-NEXT: .cfi_def_cfa_offset 224
-; CHECK-NEXT: std %f8, 56(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f9, 48(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f10, 40(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f11, 32(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f12, 24(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f13, 16(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f14, 8(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f15, 0(%r15) # 8-byte Folded Spill
+; CHECK-NEXT: std %f8, 56(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f9, 48(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f10, 40(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f11, 32(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f12, 24(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f13, 16(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f14, 8(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f15, 0(%r15) # 8-byte Spill
; CHECK-NEXT: .cfi_offset %f8, -168
; CHECK-NEXT: .cfi_offset %f9, -176
; CHECK-NEXT: .cfi_offset %f10, -184
@@ -49,14 +49,14 @@ define void @foo() {
; CHECK-NEXT: .LBB0_1: # Block address taken
; CHECK-NEXT: # %entry
; CHECK-NEXT: .LBB0_2: # %entry
-; CHECK-NEXT: ld %f8, 56(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f9, 48(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f10, 40(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f11, 32(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f12, 24(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f13, 16(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f14, 8(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f15, 0(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: ld %f8, 56(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f9, 48(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f10, 40(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f11, 32(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f12, 24(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f13, 16(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f14, 8(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f15, 0(%r15) # 8-byte Reload
; CHECK-NEXT: lmg %r6, %r15, 112(%r15)
; CHECK-NEXT: br %r14
entry:
@@ -82,14 +82,14 @@ define void @foo1() "backchain" {
; CHECK-NEXT: aghi %r15, -64
; CHECK-NEXT: .cfi_def_cfa_offset 224
; CHECK-NEXT: stg %r1, 0(%r15)
-; CHECK-NEXT: std %f8, 56(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f9, 48(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f10, 40(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f11, 32(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f12, 24(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f13, 16(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f14, 8(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f15, 0(%r15) # 8-byte Folded Spill
+; CHECK-NEXT: std %f8, 56(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f9, 48(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f10, 40(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f11, 32(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f12, 24(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f13, 16(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f14, 8(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f15, 0(%r15) # 8-byte Spill
; CHECK-NEXT: .cfi_offset %f8, -168
; CHECK-NEXT: .cfi_offset %f9, -176
; CHECK-NEXT: .cfi_offset %f10, -184
@@ -107,14 +107,14 @@ define void @foo1() "backchain" {
; CHECK-NEXT: .LBB1_1: # Block address taken
; CHECK-NEXT: # %entry
; CHECK-NEXT: .LBB1_2: # %entry
-; CHECK-NEXT: ld %f8, 56(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f9, 48(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f10, 40(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f11, 32(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f12, 24(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f13, 16(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f14, 8(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f15, 0(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: ld %f8, 56(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f9, 48(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f10, 40(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f11, 32(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f12, 24(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f13, 16(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f14, 8(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f15, 0(%r15) # 8-byte Reload
; CHECK-NEXT: lmg %r6, %r15, 112(%r15)
; CHECK-NEXT: br %r14
entry:
diff --git a/llvm/test/CodeGen/SystemZ/fmuladd-soft-float.ll b/llvm/test/CodeGen/SystemZ/fmuladd-soft-float.ll
index d0bfe74719f89..1447c576f48ae 100644
--- a/llvm/test/CodeGen/SystemZ/fmuladd-soft-float.ll
+++ b/llvm/test/CodeGen/SystemZ/fmuladd-soft-float.ll
@@ -101,9 +101,9 @@ define <4 x float> @fmuladd_contract_v4f32(<4 x float> %a, <4 x float> %b, <4 x
; SOFT-FLOAT-NEXT: aghi %r15, -176
; SOFT-FLOAT-NEXT: .cfi_def_cfa_offset 336
; SOFT-FLOAT-NEXT: llgf %r0, 388(%r15)
-; SOFT-FLOAT-NEXT: stg %r0, 168(%r15) # 8-byte Folded Spill
+; SOFT-FLOAT-NEXT: stg %r0, 168(%r15) # 8-byte Spill
; SOFT-FLOAT-NEXT: llgf %r0, 380(%r15)
-; SOFT-FLOAT-NEXT: stg %r0, 160(%r15) # 8-byte Folded Spill
+; SOFT-FLOAT-NEXT: stg %r0, 160(%r15) # 8-byte Spill
; SOFT-FLOAT-NEXT: llgf %r11, 372(%r15)
; SOFT-FLOAT-NEXT: llgf %r10, 364(%r15)
; SOFT-FLOAT-NEXT: llgf %r8, 340(%r15)
@@ -139,11 +139,11 @@ define <4 x float> @fmuladd_contract_v4f32(<4 x float> %a, <4 x float> %b, <4 x
; SOFT-FLOAT-NEXT: brasl %r14, __addsf3 at PLT
; SOFT-FLOAT-NEXT: lgr %r12, %r2
; SOFT-FLOAT-NEXT: lgr %r2, %r13
-; SOFT-FLOAT-NEXT: lg %r3, 160(%r15) # 8-byte Folded Reload
+; SOFT-FLOAT-NEXT: lg %r3, 160(%r15) # 8-byte Reload
; SOFT-FLOAT-NEXT: brasl %r14, __addsf3 at PLT
; SOFT-FLOAT-NEXT: lgr %r13, %r2
; SOFT-FLOAT-NEXT: lgr %r2, %r9
-; SOFT-FLOAT-NEXT: lg %r3, 168(%r15) # 8-byte Folded Reload
+; SOFT-FLOAT-NEXT: lg %r3, 168(%r15) # 8-byte Reload
; SOFT-FLOAT-NEXT: brasl %r14, __addsf3 at PLT
; SOFT-FLOAT-NEXT: lgr %r5, %r2
; SOFT-FLOAT-NEXT: lr %r2, %r10
@@ -203,15 +203,15 @@ define <4 x double> @fmuladd_contract_v4f64(<4 x double> %a, <4 x double> %b, <4
; SOFT-FLOAT-NEXT: brasl %r14, __adddf3 at PLT
; SOFT-FLOAT-NEXT: lgr %r10, %r2
; SOFT-FLOAT-NEXT: lgr %r2, %r12
-; SOFT-FLOAT-NEXT: lg %r3, 160(%r15) # 8-byte Folded Reload
+; SOFT-FLOAT-NEXT: lg %r3, 160(%r15) # 8-byte Reload
; SOFT-FLOAT-NEXT: brasl %r14, __adddf3 at PLT
; SOFT-FLOAT-NEXT: lgr %r12, %r2
; SOFT-FLOAT-NEXT: lgr %r2, %r13
-; SOFT-FLOAT-NEXT: lg %r3, 168(%r15) # 8-byte Folded Reload
+; SOFT-FLOAT-NEXT: lg %r3, 168(%r15) # 8-byte Reload
; SOFT-FLOAT-NEXT: brasl %r14, __adddf3 at PLT
; SOFT-FLOAT-NEXT: lgr %r13, %r2
; SOFT-FLOAT-NEXT: lgr %r2, %r11
-; SOFT-FLOAT-NEXT: lg %r3, 176(%r15) # 8-byte Folded Reload
+; SOFT-FLOAT-NEXT: lg %r3, 176(%r15) # 8-byte Reload
; SOFT-FLOAT-NEXT: brasl %r14, __adddf3 at PLT
; SOFT-FLOAT-NEXT: lgr %r5, %r2
; SOFT-FLOAT-NEXT: lgr %r2, %r10
diff --git a/llvm/test/CodeGen/SystemZ/foldmemop-imm-02.mir b/llvm/test/CodeGen/SystemZ/foldmemop-imm-02.mir
index 653bb42e1cad2..d5ebbaabdceba 100644
--- a/llvm/test/CodeGen/SystemZ/foldmemop-imm-02.mir
+++ b/llvm/test/CodeGen/SystemZ/foldmemop-imm-02.mir
@@ -125,7 +125,7 @@ body: |
# CHECK: mvhi 160(%r15), 0 # 4-byte Folded Spill
# CHECK: mvc 160(4,%r15), 0(%r2) # 4-byte Folded Spill
# CHECK-LABEL: .LBB2_2:
-# CHECK: l %r0, 160(%r15) # 4-byte Folded Reload
+# CHECK: l %r0, 160(%r15) # 4-byte Reload
# CHECK: clfi %r0, 65536
---
name: fun2
@@ -182,7 +182,7 @@ body: |
# CHECK: mvghi 160(%r15), 0 # 8-byte Folded Spill
# CHECK: mvc 160(8,%r15), 0(%r2) # 8-byte Folded Spill
# CHECK-LABEL: .LBB3_2:
-# CHECK: lg %r0, 160(%r15) # 8-byte Folded Reload
+# CHECK: lg %r0, 160(%r15) # 8-byte Reload
# CHECK: clgfi %r0, 65536
---
name: fun3
diff --git a/llvm/test/CodeGen/SystemZ/foldmemop-vec-cc.mir b/llvm/test/CodeGen/SystemZ/foldmemop-vec-cc.mir
index dbdd3a0a21bf1..5d0663c95e532 100644
--- a/llvm/test/CodeGen/SystemZ/foldmemop-vec-cc.mir
+++ b/llvm/test/CodeGen/SystemZ/foldmemop-vec-cc.mir
@@ -9,7 +9,7 @@
...
# CHECK-LABEL: fun0:
-# CHECK: ld %f1, 160(%r15) # 8-byte Folded Reload
+# CHECK: ld %f1, 160(%r15) # 8-byte Reload
# CHECK-NEXT: wfadb %f0, %f0, %f1
---
name: fun0
diff --git a/llvm/test/CodeGen/SystemZ/foldmemop-vec-cmp.mir b/llvm/test/CodeGen/SystemZ/foldmemop-vec-cmp.mir
index 1344809651ad7..86c23643c0acb 100644
--- a/llvm/test/CodeGen/SystemZ/foldmemop-vec-cmp.mir
+++ b/llvm/test/CodeGen/SystemZ/foldmemop-vec-cmp.mir
@@ -94,7 +94,7 @@ body: |
# CDB can't be used if one operand is a VR64 (and not FP64).
# CHECK-LABEL: fun2:
-# CHECK: ld %f0, 160(%r15) # 8-byte Folded Reload
+# CHECK: ld %f0, 160(%r15) # 8-byte Reload
# CHECK-NEXT: wfcdb %v16, %f0
---
name: fun2
@@ -204,7 +204,7 @@ body: |
# CEB can't be used if one operand is a VR32 (and not FP32).
# CHECK-LABEL: fun5:
-# CHECK: lde %f0, 164(%r15) # 4-byte Folded Reload
+# CHECK: lde %f0, 164(%r15) # 4-byte Reload
# CHECK-NEXT: wfcsb %v16, %f0
---
name: fun5
@@ -314,7 +314,7 @@ body: |
# KDB can't be used if one operand is a VR64 (and not FP64).
# CHECK-LABEL: fun8:
-# CHECK: ld %f0, 160(%r15) # 8-byte Folded Reload
+# CHECK: ld %f0, 160(%r15) # 8-byte Reload
# CHECK-NEXT: wfkdb %v16, %f0
---
name: fun8
@@ -424,7 +424,7 @@ body: |
# CEB can't be used if one operand is a VR32 (and not FP32).
# CHECK-LABEL: fun11:
-# CHECK: lde %f0, 164(%r15) # 4-byte Folded Reload
+# CHECK: lde %f0, 164(%r15) # 4-byte Reload
# CHECK-NEXT: wfksb %v16, %f0
---
name: fun11
diff --git a/llvm/test/CodeGen/SystemZ/foldmemop-vec-unary.mir b/llvm/test/CodeGen/SystemZ/foldmemop-vec-unary.mir
index e811cb9ddc993..86b3b0297454a 100644
--- a/llvm/test/CodeGen/SystemZ/foldmemop-vec-unary.mir
+++ b/llvm/test/CodeGen/SystemZ/foldmemop-vec-unary.mir
@@ -44,7 +44,7 @@ body: |
# LDEB can't be used if dst operand is a VR32 (and not FP32).
# CHECK-LABEL: fun1:
-# CHECK: lde %f0, 164(%r15) # 4-byte Folded Reload
+# CHECK: lde %f0, 164(%r15) # 4-byte Reload
# CHECK-NEXT: wldeb %v16, %f0
---
name: fun1
@@ -75,7 +75,7 @@ body: |
# Spilling the destination of an fp extension needs an extra store instruction.
# CHECK-LABEL: fun2:
# CHECK: ldebr %f0, %f0
-# CHECK-NEXT: std %f0, 160(%r15) # 8-byte Folded Spill
+# CHECK-NEXT: std %f0, 160(%r15) # 8-byte Spill
---
name: fun2
alignment: 16
@@ -130,7 +130,7 @@ body: |
# SQDB can't be used if dst operand is a VR64 (and not FP64).
# CHECK-LABEL: fun4:
-# CHECK: ld %f0, 160(%r15) # 8-byte Folded Reload
+# CHECK: ld %f0, 160(%r15) # 8-byte Reload
# CHECK-NEXT: wfsqdb %v16, %f0
---
name: fun4
@@ -187,7 +187,7 @@ body: |
# SQEB can't be used if dst operand is a VR32 (and not FP32).
# CHECK-LABEL: fun6:
-# CHECK: lde %f0, 164(%r15) # 4-byte Folded Reload
+# CHECK: lde %f0, 164(%r15) # 4-byte Reload
# CHECK-NEXT: wfsqsb %v16, %f0
---
name: fun6
diff --git a/llvm/test/CodeGen/SystemZ/fp-move-02.ll b/llvm/test/CodeGen/SystemZ/fp-move-02.ll
index 9df852dccbc82..7f7ac7cda83d3 100644
--- a/llvm/test/CodeGen/SystemZ/fp-move-02.ll
+++ b/llvm/test/CodeGen/SystemZ/fp-move-02.ll
@@ -156,9 +156,9 @@ define void @f10(double %extra) {
; CHECK-NEXT: adb %f2, 0(%r1)
; CHECK-NEXT: ldr %f3, %f0
; CHECK-NEXT: adb %f3, 0(%r1)
-; CHECK-NEXT: std %f1, 176(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f2, 168(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f3, 160(%r15) # 8-byte Folded Spill
+; CHECK-NEXT: std %f1, 176(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f2, 168(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f3, 160(%r15) # 8-byte Spill
; CHECK-NEXT: ldr %f1, %f0
; CHECK-NEXT: adb %f1, 0(%r1)
; CHECK-NEXT: ldr %f2, %f0
@@ -263,14 +263,14 @@ define void @f11(i64 %mask) {
; CHECK-NEXT: .cfi_offset %r15, -40
; CHECK-NEXT: aghi %r15, -240
; CHECK-NEXT: .cfi_def_cfa_offset 400
-; CHECK-NEXT: std %f8, 232(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f9, 224(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f10, 216(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f11, 208(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f12, 200(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f13, 192(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f14, 184(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f15, 176(%r15) # 8-byte Folded Spill
+; CHECK-NEXT: std %f8, 232(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f9, 224(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f10, 216(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f11, 208(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f12, 200(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f13, 192(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f14, 184(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f15, 176(%r15) # 8-byte Spill
; CHECK-NEXT: .cfi_offset %f8, -168
; CHECK-NEXT: .cfi_offset %f9, -176
; CHECK-NEXT: .cfi_offset %f10, -184
@@ -286,8 +286,8 @@ define void @f11(i64 %mask) {
; CHECK-NEXT: ng %r3, 0(%r1)
; CHECK-NEXT: lgr %r4, %r2
; CHECK-NEXT: ng %r4, 0(%r1)
-; CHECK-NEXT: stg %r0, 168(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: stg %r3, 160(%r15) # 8-byte Folded Spill
+; CHECK-NEXT: stg %r0, 168(%r15) # 8-byte Spill
+; CHECK-NEXT: stg %r3, 160(%r15) # 8-byte Spill
; CHECK-NEXT: lgr %r0, %r2
; CHECK-NEXT: ng %r0, 0(%r1)
; CHECK-NEXT: ldgr %f10, %r4
@@ -329,14 +329,14 @@ define void @f11(i64 %mask) {
; CHECK-NEXT: std %f1, 0(%r13)
; CHECK-NEXT: jlh .LBB10_1
; CHECK-NEXT: # %bb.2: # %exit
-; CHECK-NEXT: ld %f8, 232(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f9, 224(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f10, 216(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f11, 208(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f12, 200(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f13, 192(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f14, 184(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f15, 176(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: ld %f8, 232(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f9, 224(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f10, 216(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f11, 208(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f12, 200(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f13, 192(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f14, 184(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f15, 176(%r15) # 8-byte Reload
; CHECK-NEXT: lmg %r12, %r15, 336(%r15)
; CHECK-NEXT: br %r14
entry:
@@ -431,12 +431,12 @@ define void @f12() {
; CHECK-NEXT: agr %r9, %r2
; CHECK-NEXT: agr %r10, %r2
; CHECK-NEXT: agr %r11, %r2
-; CHECK-NEXT: lg %r0, 160(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: lg %r0, 160(%r15) # 8-byte Reload
; CHECK-NEXT: agr %r0, %r2
-; CHECK-NEXT: stg %r0, 160(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: lg %r0, 168(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: stg %r0, 160(%r15) # 8-byte Spill
+; CHECK-NEXT: lg %r0, 168(%r15) # 8-byte Reload
; CHECK-NEXT: agr %r0, %r2
-; CHECK-NEXT: stg %r0, 168(%r15) # 8-byte Folded Spill
+; CHECK-NEXT: stg %r0, 168(%r15) # 8-byte Spill
; CHECK-NEXT: cgijlh %r2, 1, .LBB11_1
; CHECK-NEXT: # %bb.2: # %exit
; CHECK-NEXT: brasl %r14, foo at PLT
@@ -466,10 +466,10 @@ define void @f12() {
; CHECK-NEXT: ldgr %f1, %r11
; CHECK-NEXT: mdbr %f1, %f0
; CHECK-NEXT: std %f1, 0(%r1)
-; CHECK-NEXT: ld %f1, 160(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: ld %f1, 160(%r15) # 8-byte Reload
; CHECK-NEXT: mdbr %f1, %f0
; CHECK-NEXT: std %f1, 0(%r1)
-; CHECK-NEXT: ld %f1, 168(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: ld %f1, 168(%r15) # 8-byte Reload
; CHECK-NEXT: mdbr %f1, %f0
; CHECK-NEXT: std %f1, 0(%r1)
; CHECK-NEXT: brasl %r14, foo at PLT
@@ -554,14 +554,14 @@ define void @f13() {
; CHECK-NEXT: .cfi_offset %r15, -40
; CHECK-NEXT: aghi %r15, -240
; CHECK-NEXT: .cfi_def_cfa_offset 400
-; CHECK-NEXT: std %f8, 232(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f9, 224(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f10, 216(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f11, 208(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f12, 200(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f13, 192(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f14, 184(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f15, 176(%r15) # 8-byte Folded Spill
+; CHECK-NEXT: std %f8, 232(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f9, 224(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f10, 216(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f11, 208(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f12, 200(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f13, 192(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f14, 184(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f15, 176(%r15) # 8-byte Spill
; CHECK-NEXT: .cfi_offset %f8, -168
; CHECK-NEXT: .cfi_offset %f9, -176
; CHECK-NEXT: .cfi_offset %f10, -184
@@ -579,8 +579,8 @@ define void @f13() {
; CHECK-NEXT: ldr %f12, %f8
; CHECK-NEXT: ldr %f11, %f8
; CHECK-NEXT: ldr %f10, %f8
-; CHECK-NEXT: std %f8, 160(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f8, 168(%r15) # 8-byte Folded Spill
+; CHECK-NEXT: std %f8, 160(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f8, 168(%r15) # 8-byte Spill
; CHECK-NEXT: .LBB12_1: # %loop
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: brasl %r14, bar at PLT
@@ -593,12 +593,12 @@ define void @f13() {
; CHECK-NEXT: cdb %f0, 0(%r13)
; CHECK-NEXT: mdbr %f11, %f0
; CHECK-NEXT: mdbr %f10, %f0
-; CHECK-NEXT: ld %f1, 160(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: ld %f1, 160(%r15) # 8-byte Reload
; CHECK-NEXT: mdbr %f1, %f0
-; CHECK-NEXT: std %f1, 160(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: ld %f1, 168(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: std %f1, 160(%r15) # 8-byte Spill
+; CHECK-NEXT: ld %f1, 168(%r15) # 8-byte Reload
; CHECK-NEXT: mdbr %f1, %f0
-; CHECK-NEXT: std %f1, 168(%r15) # 8-byte Folded Spill
+; CHECK-NEXT: std %f1, 168(%r15) # 8-byte Spill
; CHECK-NEXT: jlh .LBB12_1
; CHECK-NEXT: # %bb.2: # %exit
; CHECK-NEXT: brasl %r14, foo at PLT
@@ -628,21 +628,21 @@ define void @f13() {
; CHECK-NEXT: lgdr %r2, %f10
; CHECK-NEXT: agr %r2, %r0
; CHECK-NEXT: stg %r2, 0(%r1)
-; CHECK-NEXT: lg %r2, 160(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: lg %r2, 160(%r15) # 8-byte Reload
; CHECK-NEXT: agr %r2, %r0
; CHECK-NEXT: stg %r2, 0(%r1)
-; CHECK-NEXT: lg %r2, 168(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: lg %r2, 168(%r15) # 8-byte Reload
; CHECK-NEXT: agr %r2, %r0
; CHECK-NEXT: stg %r2, 0(%r1)
; CHECK-NEXT: brasl %r14, foo at PLT
-; CHECK-NEXT: ld %f8, 232(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f9, 224(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f10, 216(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f11, 208(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f12, 200(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f13, 192(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f14, 184(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f15, 176(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: ld %f8, 232(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f9, 224(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f10, 216(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f11, 208(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f12, 200(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f13, 192(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f14, 184(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f15, 176(%r15) # 8-byte Reload
; CHECK-NEXT: lmg %r13, %r15, 344(%r15)
; CHECK-NEXT: br %r14
entry:
diff --git a/llvm/test/CodeGen/SystemZ/frame-22.ll b/llvm/test/CodeGen/SystemZ/frame-22.ll
index fd4e3fec508a9..80e3a2f5ce930 100644
--- a/llvm/test/CodeGen/SystemZ/frame-22.ll
+++ b/llvm/test/CodeGen/SystemZ/frame-22.ll
@@ -8,11 +8,11 @@ define void @f1() #0 {
; CHECK: stmg %r12, %r15, 128(%r15)
; CHECK-NEXT: .cfi_offset %r12, -32
; CHECK-NEXT: .cfi_offset %r15, -8
-; CHECK-NEXT: std %f8, 120(%r15) # 8-byte Folded Spill
+; CHECK-NEXT: std %f8, 120(%r15) # 8-byte Spill
; CHECK-NEXT: .cfi_offset %f8, -40
; CHECK-NEXT: #APP
; CHECK-NEXT: #NO_APP
-; CHECK-NEXT: ld %f8, 120(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: ld %f8, 120(%r15) # 8-byte Reload
; CHECK-NEXT: lmg %r12, %r15, 128(%r15)
; CHECK-NEXT: br %r14
call void asm sideeffect "", "~{f8},~{r12}"() nounwind
@@ -25,14 +25,14 @@ define anyregcc void @f2() #0 {
; CHECK: stmg %r3, %r15, 56(%r15)
; CHECK-NEXT: .cfi_offset %r3, -104
; CHECK-NEXT: .cfi_offset %r15, -8
-; CHECK-NEXT: std %f0, 48(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f1, 40(%r15) # 8-byte Folded Spill
+; CHECK-NEXT: std %f0, 48(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f1, 40(%r15) # 8-byte Spill
; CHECK-NEXT: .cfi_offset %f0, -112
; CHECK-NEXT: .cfi_offset %f1, -120
; CHECK-NEXT: #APP
; CHECK-NEXT: #NO_APP
-; CHECK-NEXT: ld %f0, 48(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f1, 40(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: ld %f0, 48(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f1, 40(%r15) # 8-byte Reload
; CHECK-NEXT: lmg %r3, %r15, 56(%r15)
; CHECK-NEXT: br %r14
call void asm sideeffect "", "~{f0},~{f1},~{r3}"() nounwind
@@ -43,14 +43,14 @@ define anyregcc void @f2() #0 {
define i64 @f3(i64 %a, i64 %b, i64 %c, i64 %d, i64 %e, i64 %f,
double %A, double %B, double %C, double %D, double %E) #0 {
; CHECK-LABEL: f3:
-; CHECK: std %f8, 152(%r15) # 8-byte Folded Spill
+; CHECK: std %f8, 152(%r15) # 8-byte Spill
; CHECK-NEXT: .cfi_offset %f8, -8
; CHECK-NEXT: ld %f0, 168(%r15)
; CHECK-NEXT: cgdbr %r2, 5, %f0
; CHECK-NEXT: ag %r2, 160(%r15)
; CHECK-NEXT: #APP
; CHECK-NEXT: #NO_APP
-; CHECK-NEXT: ld %f8, 152(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: ld %f8, 152(%r15) # 8-byte Reload
; CHECK-NEXT: br %r14
call void asm sideeffect "", "~{f8}"() nounwind
%Ei = fptosi double %E to i64
@@ -67,7 +67,7 @@ define i64 @f4() #0 {
; CHECK-NEXT: .cfi_offset %r15, -8
; CHECK-NEXT: aghi %r15, -104
; CHECK-NEXT: .cfi_def_cfa_offset 264
-; CHECK-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
+; CHECK-NEXT: std %f8, 176(%r15) # 8-byte Spill
; CHECK-NEXT: .cfi_offset %f8, -88
; CHECK-NEXT: #APP
; CHECK-NEXT: #NO_APP
@@ -75,7 +75,7 @@ define i64 @f4() #0 {
; CHECK-NEXT: stg %r0, 168(%r15)
; CHECK: mvghi 160(%r15), 6
; CHECK-NEXT: brasl %r14, f3 at PLT
-; CHECK-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: ld %f8, 176(%r15) # 8-byte Reload
; CHECK-NEXT: lmg %r6, %r15, 184(%r15)
; CHECK-NEXT: br %r14
call void asm sideeffect "", "~{f8}"() nounwind
diff --git a/llvm/test/CodeGen/SystemZ/int-uadd-03.ll b/llvm/test/CodeGen/SystemZ/int-uadd-03.ll
index 4ecbb95b639b1..95b97ac854ec4 100644
--- a/llvm/test/CodeGen/SystemZ/int-uadd-03.ll
+++ b/llvm/test/CodeGen/SystemZ/int-uadd-03.ll
@@ -244,9 +244,9 @@ define zeroext i1 @f11(ptr %ptr0) {
; CHECK-NEXT: a %r11, 56(%r2)
; CHECK-NEXT: lhi %r1, 100
; CHECK-NEXT: a %r1, 64(%r2)
-; CHECK-NEXT: st %r1, 160(%r15) # 4-byte Folded Spill
+; CHECK-NEXT: st %r1, 160(%r15) # 4-byte Spill
; CHECK-NEXT: a %r0, 72(%r2)
-; CHECK-NEXT: st %r0, 164(%r15) # 4-byte Folded Spill
+; CHECK-NEXT: st %r0, 164(%r15) # 4-byte Spill
; CHECK-NEXT: st %r12, 0(%r2)
; CHECK-NEXT: st %r13, 8(%r2)
; CHECK-NEXT: st %r6, 16(%r2)
diff --git a/llvm/test/CodeGen/SystemZ/int-usub-03.ll b/llvm/test/CodeGen/SystemZ/int-usub-03.ll
index 83c3db4199255..af2abf856ff29 100644
--- a/llvm/test/CodeGen/SystemZ/int-usub-03.ll
+++ b/llvm/test/CodeGen/SystemZ/int-usub-03.ll
@@ -252,9 +252,9 @@ define zeroext i1 @f11(ptr %ptr0) {
; CHECK-NEXT: a %r11, 56(%r2)
; CHECK-NEXT: lhi %r1, 100
; CHECK-NEXT: a %r1, 64(%r2)
-; CHECK-NEXT: st %r1, 160(%r15) # 4-byte Folded Spill
+; CHECK-NEXT: st %r1, 160(%r15) # 4-byte Spill
; CHECK-NEXT: a %r0, 72(%r2)
-; CHECK-NEXT: st %r0, 164(%r15) # 4-byte Folded Spill
+; CHECK-NEXT: st %r0, 164(%r15) # 4-byte Spill
; CHECK-NEXT: st %r12, 0(%r2)
; CHECK-NEXT: st %r13, 8(%r2)
; CHECK-NEXT: st %r6, 16(%r2)
diff --git a/llvm/test/CodeGen/SystemZ/vector-constrained-fp-intrinsics.ll b/llvm/test/CodeGen/SystemZ/vector-constrained-fp-intrinsics.ll
index a3e453de913fe..e0818ea3da294 100644
--- a/llvm/test/CodeGen/SystemZ/vector-constrained-fp-intrinsics.ll
+++ b/llvm/test/CodeGen/SystemZ/vector-constrained-fp-intrinsics.ll
@@ -226,8 +226,8 @@ define <2 x double> @constrained_vector_frem_v2f64() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -176
; S390X-NEXT: .cfi_def_cfa_offset 336
-; S390X-NEXT: std %f8, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: larl %r1, .LCPI6_0
@@ -243,8 +243,8 @@ define <2 x double> @constrained_vector_frem_v2f64() #0 {
; S390X-NEXT: ldr %f2, %f8
; S390X-NEXT: brasl %r14, fmod at PLT
; S390X-NEXT: ldr %f2, %f9
-; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 288(%r15)
; S390X-NEXT: br %r14
;
@@ -255,7 +255,7 @@ define <2 x double> @constrained_vector_frem_v2f64() #0 {
; SZ13-NEXT: .cfi_offset %r15, -40
; SZ13-NEXT: aghi %r15, -184
; SZ13-NEXT: .cfi_def_cfa_offset 344
-; SZ13-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
+; SZ13-NEXT: std %f8, 176(%r15) # 8-byte Spill
; SZ13-NEXT: .cfi_offset %f8, -168
; SZ13-NEXT: larl %r1, .LCPI6_0
; SZ13-NEXT: ld %f8, 0(%r1)
@@ -268,7 +268,7 @@ define <2 x double> @constrained_vector_frem_v2f64() #0 {
; SZ13-NEXT: ldr %f2, %f8
; SZ13-NEXT: brasl %r14, fmod at PLT
; SZ13-NEXT: vl %v1, 160(%r15), 3 # 16-byte Folded Reload
-; SZ13-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
+; SZ13-NEXT: ld %f8, 176(%r15) # 8-byte Reload
; SZ13-NEXT: # kill: def $f0d killed $f0d def $v0
; SZ13-NEXT: vmrhg %v24, %v0, %v1
; SZ13-NEXT: lmg %r14, %r15, 296(%r15)
@@ -290,9 +290,9 @@ define <3 x float> @constrained_vector_frem_v3f32() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -316,9 +316,9 @@ define <3 x float> @constrained_vector_frem_v3f32() #0 {
; S390X-NEXT: brasl %r14, fmodf at PLT
; S390X-NEXT: ler %f2, %f10
; S390X-NEXT: ler %f4, %f9
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 296(%r15)
; S390X-NEXT: br %r14
;
@@ -329,7 +329,7 @@ define <3 x float> @constrained_vector_frem_v3f32() #0 {
; SZ13-NEXT: .cfi_offset %r15, -40
; SZ13-NEXT: aghi %r15, -200
; SZ13-NEXT: .cfi_def_cfa_offset 360
-; SZ13-NEXT: std %f8, 192(%r15) # 8-byte Folded Spill
+; SZ13-NEXT: std %f8, 192(%r15) # 8-byte Spill
; SZ13-NEXT: .cfi_offset %f8, -168
; SZ13-NEXT: larl %r1, .LCPI7_0
; SZ13-NEXT: lde %f0, 0(%r1)
@@ -351,7 +351,7 @@ define <3 x float> @constrained_vector_frem_v3f32() #0 {
; SZ13-NEXT: # kill: def $f0s killed $f0s def $v0
; SZ13-NEXT: vmrhf %v0, %v1, %v0
; SZ13-NEXT: vl %v1, 176(%r15), 3 # 16-byte Folded Reload
-; SZ13-NEXT: ld %f8, 192(%r15) # 8-byte Folded Reload
+; SZ13-NEXT: ld %f8, 192(%r15) # 8-byte Reload
; SZ13-NEXT: vrepf %v1, %v1, 0
; SZ13-NEXT: vmrhg %v24, %v0, %v1
; SZ13-NEXT: lmg %r14, %r15, 312(%r15)
@@ -374,9 +374,9 @@ define void @constrained_vector_frem_v3f64(ptr %a) #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -402,9 +402,9 @@ define void @constrained_vector_frem_v3f64(ptr %a) #0 {
; S390X-NEXT: std %f0, 0(%r13)
; S390X-NEXT: std %f9, 8(%r13)
; S390X-NEXT: std %f10, 16(%r13)
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r13, %r15, 288(%r15)
; S390X-NEXT: br %r14
;
@@ -416,7 +416,7 @@ define void @constrained_vector_frem_v3f64(ptr %a) #0 {
; SZ13-NEXT: .cfi_offset %r15, -40
; SZ13-NEXT: aghi %r15, -200
; SZ13-NEXT: .cfi_def_cfa_offset 360
-; SZ13-NEXT: std %f8, 192(%r15) # 8-byte Folded Spill
+; SZ13-NEXT: std %f8, 192(%r15) # 8-byte Spill
; SZ13-NEXT: .cfi_offset %f8, -168
; SZ13-NEXT: vl %v2, 0(%r2), 4
; SZ13-NEXT: ld %f8, 16(%r2)
@@ -442,7 +442,7 @@ define void @constrained_vector_frem_v3f64(ptr %a) #0 {
; SZ13-NEXT: brasl %r14, fmod at PLT
; SZ13-NEXT: std %f0, 16(%r13)
; SZ13-NEXT: vl %v0, 160(%r15), 3 # 16-byte Folded Reload
-; SZ13-NEXT: ld %f8, 192(%r15) # 8-byte Folded Reload
+; SZ13-NEXT: ld %f8, 192(%r15) # 8-byte Reload
; SZ13-NEXT: vst %v0, 0(%r13), 4
; SZ13-NEXT: lmg %r13, %r15, 304(%r15)
; SZ13-NEXT: br %r14
@@ -465,10 +465,10 @@ define <4 x double> @constrained_vector_frem_v4f64() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -192
; S390X-NEXT: .cfi_def_cfa_offset 352
-; S390X-NEXT: std %f8, 184(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f11, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 184(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f11, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -500,10 +500,10 @@ define <4 x double> @constrained_vector_frem_v4f64() #0 {
; S390X-NEXT: ldr %f2, %f11
; S390X-NEXT: ldr %f4, %f10
; S390X-NEXT: ldr %f6, %f9
-; S390X-NEXT: ld %f8, 184(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f11, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 184(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f11, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 304(%r15)
; S390X-NEXT: br %r14
;
@@ -514,7 +514,7 @@ define <4 x double> @constrained_vector_frem_v4f64() #0 {
; SZ13-NEXT: .cfi_offset %r15, -40
; SZ13-NEXT: aghi %r15, -200
; SZ13-NEXT: .cfi_def_cfa_offset 360
-; SZ13-NEXT: std %f8, 192(%r15) # 8-byte Folded Spill
+; SZ13-NEXT: std %f8, 192(%r15) # 8-byte Spill
; SZ13-NEXT: .cfi_offset %f8, -168
; SZ13-NEXT: larl %r1, .LCPI9_0
; SZ13-NEXT: ld %f8, 0(%r1)
@@ -542,7 +542,7 @@ define <4 x double> @constrained_vector_frem_v4f64() #0 {
; SZ13-NEXT: brasl %r14, fmod at PLT
; SZ13-NEXT: vl %v1, 176(%r15), 3 # 16-byte Folded Reload
; SZ13-NEXT: vl %v24, 160(%r15), 3 # 16-byte Folded Reload
-; SZ13-NEXT: ld %f8, 192(%r15) # 8-byte Folded Reload
+; SZ13-NEXT: ld %f8, 192(%r15) # 8-byte Reload
; SZ13-NEXT: # kill: def $f0d killed $f0d def $v0
; SZ13-NEXT: vmrhg %v26, %v0, %v1
; SZ13-NEXT: lmg %r14, %r15, 312(%r15)
@@ -1252,8 +1252,8 @@ define <2 x double> @constrained_vector_pow_v2f64() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -176
; S390X-NEXT: .cfi_def_cfa_offset 336
-; S390X-NEXT: std %f8, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: larl %r1, .LCPI31_0
@@ -1269,8 +1269,8 @@ define <2 x double> @constrained_vector_pow_v2f64() #0 {
; S390X-NEXT: ldr %f2, %f8
; S390X-NEXT: brasl %r14, pow at PLT
; S390X-NEXT: ldr %f2, %f9
-; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 288(%r15)
; S390X-NEXT: br %r14
;
@@ -1281,7 +1281,7 @@ define <2 x double> @constrained_vector_pow_v2f64() #0 {
; SZ13-NEXT: .cfi_offset %r15, -40
; SZ13-NEXT: aghi %r15, -184
; SZ13-NEXT: .cfi_def_cfa_offset 344
-; SZ13-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
+; SZ13-NEXT: std %f8, 176(%r15) # 8-byte Spill
; SZ13-NEXT: .cfi_offset %f8, -168
; SZ13-NEXT: larl %r1, .LCPI31_0
; SZ13-NEXT: ld %f0, 0(%r1)
@@ -1296,7 +1296,7 @@ define <2 x double> @constrained_vector_pow_v2f64() #0 {
; SZ13-NEXT: ldr %f2, %f8
; SZ13-NEXT: brasl %r14, pow at PLT
; SZ13-NEXT: vl %v1, 160(%r15), 3 # 16-byte Folded Reload
-; SZ13-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
+; SZ13-NEXT: ld %f8, 176(%r15) # 8-byte Reload
; SZ13-NEXT: # kill: def $f0d killed $f0d def $v0
; SZ13-NEXT: vmrhg %v24, %v0, %v1
; SZ13-NEXT: lmg %r14, %r15, 296(%r15)
@@ -1318,9 +1318,9 @@ define <3 x float> @constrained_vector_pow_v3f32() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -1344,9 +1344,9 @@ define <3 x float> @constrained_vector_pow_v3f32() #0 {
; S390X-NEXT: brasl %r14, powf at PLT
; S390X-NEXT: ler %f2, %f10
; S390X-NEXT: ler %f4, %f9
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 296(%r15)
; S390X-NEXT: br %r14
;
@@ -1357,7 +1357,7 @@ define <3 x float> @constrained_vector_pow_v3f32() #0 {
; SZ13-NEXT: .cfi_offset %r15, -40
; SZ13-NEXT: aghi %r15, -200
; SZ13-NEXT: .cfi_def_cfa_offset 360
-; SZ13-NEXT: std %f8, 192(%r15) # 8-byte Folded Spill
+; SZ13-NEXT: std %f8, 192(%r15) # 8-byte Spill
; SZ13-NEXT: .cfi_offset %f8, -168
; SZ13-NEXT: larl %r1, .LCPI32_0
; SZ13-NEXT: lde %f0, 0(%r1)
@@ -1381,7 +1381,7 @@ define <3 x float> @constrained_vector_pow_v3f32() #0 {
; SZ13-NEXT: # kill: def $f0s killed $f0s def $v0
; SZ13-NEXT: vmrhf %v0, %v1, %v0
; SZ13-NEXT: vl %v1, 176(%r15), 3 # 16-byte Folded Reload
-; SZ13-NEXT: ld %f8, 192(%r15) # 8-byte Folded Reload
+; SZ13-NEXT: ld %f8, 192(%r15) # 8-byte Reload
; SZ13-NEXT: vrepf %v1, %v1, 0
; SZ13-NEXT: vmrhg %v24, %v0, %v1
; SZ13-NEXT: lmg %r14, %r15, 312(%r15)
@@ -1404,10 +1404,10 @@ define void @constrained_vector_pow_v3f64(ptr %a) #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -192
; S390X-NEXT: .cfi_def_cfa_offset 352
-; S390X-NEXT: std %f8, 184(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f11, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 184(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f11, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -1431,10 +1431,10 @@ define void @constrained_vector_pow_v3f64(ptr %a) #0 {
; S390X-NEXT: std %f0, 0(%r13)
; S390X-NEXT: std %f10, 8(%r13)
; S390X-NEXT: std %f11, 16(%r13)
-; S390X-NEXT: ld %f8, 184(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f11, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 184(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f11, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r13, %r15, 296(%r15)
; S390X-NEXT: br %r14
;
@@ -1446,8 +1446,8 @@ define void @constrained_vector_pow_v3f64(ptr %a) #0 {
; SZ13-NEXT: .cfi_offset %r15, -40
; SZ13-NEXT: aghi %r15, -208
; SZ13-NEXT: .cfi_def_cfa_offset 368
-; SZ13-NEXT: std %f8, 200(%r15) # 8-byte Folded Spill
-; SZ13-NEXT: std %f9, 192(%r15) # 8-byte Folded Spill
+; SZ13-NEXT: std %f8, 200(%r15) # 8-byte Spill
+; SZ13-NEXT: std %f9, 192(%r15) # 8-byte Spill
; SZ13-NEXT: .cfi_offset %f8, -168
; SZ13-NEXT: .cfi_offset %f9, -176
; SZ13-NEXT: larl %r1, .LCPI33_0
@@ -1475,8 +1475,8 @@ define void @constrained_vector_pow_v3f64(ptr %a) #0 {
; SZ13-NEXT: brasl %r14, pow at PLT
; SZ13-NEXT: std %f0, 16(%r13)
; SZ13-NEXT: vl %v0, 160(%r15), 3 # 16-byte Folded Reload
-; SZ13-NEXT: ld %f8, 200(%r15) # 8-byte Folded Reload
-; SZ13-NEXT: ld %f9, 192(%r15) # 8-byte Folded Reload
+; SZ13-NEXT: ld %f8, 200(%r15) # 8-byte Reload
+; SZ13-NEXT: ld %f9, 192(%r15) # 8-byte Reload
; SZ13-NEXT: vst %v0, 0(%r13), 4
; SZ13-NEXT: lmg %r13, %r15, 312(%r15)
; SZ13-NEXT: br %r14
@@ -1499,10 +1499,10 @@ define <4 x double> @constrained_vector_pow_v4f64() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -192
; S390X-NEXT: .cfi_def_cfa_offset 352
-; S390X-NEXT: std %f8, 184(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f11, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 184(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f11, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -1534,10 +1534,10 @@ define <4 x double> @constrained_vector_pow_v4f64() #0 {
; S390X-NEXT: ldr %f2, %f11
; S390X-NEXT: ldr %f4, %f10
; S390X-NEXT: ldr %f6, %f9
-; S390X-NEXT: ld %f8, 184(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f11, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 184(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f11, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 304(%r15)
; S390X-NEXT: br %r14
;
@@ -1548,7 +1548,7 @@ define <4 x double> @constrained_vector_pow_v4f64() #0 {
; SZ13-NEXT: .cfi_offset %r15, -40
; SZ13-NEXT: aghi %r15, -200
; SZ13-NEXT: .cfi_def_cfa_offset 360
-; SZ13-NEXT: std %f8, 192(%r15) # 8-byte Folded Spill
+; SZ13-NEXT: std %f8, 192(%r15) # 8-byte Spill
; SZ13-NEXT: .cfi_offset %f8, -168
; SZ13-NEXT: larl %r1, .LCPI34_0
; SZ13-NEXT: ld %f0, 0(%r1)
@@ -1578,7 +1578,7 @@ define <4 x double> @constrained_vector_pow_v4f64() #0 {
; SZ13-NEXT: brasl %r14, pow at PLT
; SZ13-NEXT: vl %v1, 176(%r15), 3 # 16-byte Folded Reload
; SZ13-NEXT: vl %v24, 160(%r15), 3 # 16-byte Folded Reload
-; SZ13-NEXT: ld %f8, 192(%r15) # 8-byte Folded Reload
+; SZ13-NEXT: ld %f8, 192(%r15) # 8-byte Reload
; SZ13-NEXT: # kill: def $f0d killed $f0d def $v0
; SZ13-NEXT: vmrhg %v26, %v0, %v1
; SZ13-NEXT: lmg %r14, %r15, 312(%r15)
@@ -1641,7 +1641,7 @@ define <2 x double> @constrained_vector_powi_v2f64() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -168
; S390X-NEXT: .cfi_def_cfa_offset 328
-; S390X-NEXT: std %f8, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: larl %r1, .LCPI36_0
; S390X-NEXT: ld %f0, 0(%r1)
@@ -1654,7 +1654,7 @@ define <2 x double> @constrained_vector_powi_v2f64() #0 {
; S390X-NEXT: ldr %f0, %f1
; S390X-NEXT: brasl %r14, __powidf2 at PLT
; S390X-NEXT: ldr %f2, %f8
-; S390X-NEXT: ld %f8, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 280(%r15)
; S390X-NEXT: br %r14
;
@@ -1697,8 +1697,8 @@ define <3 x float> @constrained_vector_powi_v3f32() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -176
; S390X-NEXT: .cfi_def_cfa_offset 336
-; S390X-NEXT: std %f8, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: larl %r1, .LCPI37_0
@@ -1719,8 +1719,8 @@ define <3 x float> @constrained_vector_powi_v3f32() #0 {
; S390X-NEXT: brasl %r14, __powisf2 at PLT
; S390X-NEXT: ler %f2, %f9
; S390X-NEXT: ler %f4, %f8
-; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 288(%r15)
; S390X-NEXT: br %r14
;
@@ -1773,8 +1773,8 @@ define void @constrained_vector_powi_v3f64(ptr %a) #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -176
; S390X-NEXT: .cfi_def_cfa_offset 336
-; S390X-NEXT: std %f8, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: larl %r1, .LCPI38_0
@@ -1797,8 +1797,8 @@ define void @constrained_vector_powi_v3f64(ptr %a) #0 {
; S390X-NEXT: std %f0, 16(%r13)
; S390X-NEXT: std %f9, 8(%r13)
; S390X-NEXT: std %f8, 0(%r13)
-; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r13, %r15, 280(%r15)
; S390X-NEXT: br %r14
;
@@ -1853,9 +1853,9 @@ define <4 x double> @constrained_vector_powi_v4f64() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -1884,9 +1884,9 @@ define <4 x double> @constrained_vector_powi_v4f64() #0 {
; S390X-NEXT: ldr %f2, %f10
; S390X-NEXT: ldr %f4, %f9
; S390X-NEXT: ldr %f6, %f8
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 296(%r15)
; S390X-NEXT: br %r14
;
@@ -1981,7 +1981,7 @@ define <2 x double> @constrained_vector_sin_v2f64() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -168
; S390X-NEXT: .cfi_def_cfa_offset 328
-; S390X-NEXT: std %f8, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: larl %r1, .LCPI41_0
; S390X-NEXT: ld %f0, 0(%r1)
@@ -1992,7 +1992,7 @@ define <2 x double> @constrained_vector_sin_v2f64() #0 {
; S390X-NEXT: ldr %f0, %f1
; S390X-NEXT: brasl %r14, sin at PLT
; S390X-NEXT: ldr %f2, %f8
-; S390X-NEXT: ld %f8, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 280(%r15)
; S390X-NEXT: br %r14
;
@@ -2032,8 +2032,8 @@ define <3 x float> @constrained_vector_sin_v3f32() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -176
; S390X-NEXT: .cfi_def_cfa_offset 336
-; S390X-NEXT: std %f8, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: larl %r1, .LCPI42_0
@@ -2051,8 +2051,8 @@ define <3 x float> @constrained_vector_sin_v3f32() #0 {
; S390X-NEXT: brasl %r14, sinf at PLT
; S390X-NEXT: ler %f2, %f9
; S390X-NEXT: ler %f4, %f8
-; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 288(%r15)
; S390X-NEXT: br %r14
;
@@ -2101,9 +2101,9 @@ define void @constrained_vector_sin_v3f64(ptr %a) #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -2121,9 +2121,9 @@ define void @constrained_vector_sin_v3f64(ptr %a) #0 {
; S390X-NEXT: std %f0, 0(%r13)
; S390X-NEXT: std %f9, 8(%r13)
; S390X-NEXT: std %f10, 16(%r13)
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r13, %r15, 288(%r15)
; S390X-NEXT: br %r14
;
@@ -2135,7 +2135,7 @@ define void @constrained_vector_sin_v3f64(ptr %a) #0 {
; SZ13-NEXT: .cfi_offset %r15, -40
; SZ13-NEXT: aghi %r15, -200
; SZ13-NEXT: .cfi_def_cfa_offset 360
-; SZ13-NEXT: std %f8, 192(%r15) # 8-byte Folded Spill
+; SZ13-NEXT: std %f8, 192(%r15) # 8-byte Spill
; SZ13-NEXT: .cfi_offset %f8, -168
; SZ13-NEXT: vl %v0, 0(%r2), 4
; SZ13-NEXT: ld %f8, 16(%r2)
@@ -2157,7 +2157,7 @@ define void @constrained_vector_sin_v3f64(ptr %a) #0 {
; SZ13-NEXT: brasl %r14, sin at PLT
; SZ13-NEXT: std %f0, 16(%r13)
; SZ13-NEXT: vl %v0, 160(%r15), 3 # 16-byte Folded Reload
-; SZ13-NEXT: ld %f8, 192(%r15) # 8-byte Folded Reload
+; SZ13-NEXT: ld %f8, 192(%r15) # 8-byte Reload
; SZ13-NEXT: vst %v0, 0(%r13), 4
; SZ13-NEXT: lmg %r13, %r15, 304(%r15)
; SZ13-NEXT: br %r14
@@ -2179,9 +2179,9 @@ define <4 x double> @constrained_vector_sin_v4f64() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -2206,9 +2206,9 @@ define <4 x double> @constrained_vector_sin_v4f64() #0 {
; S390X-NEXT: ldr %f2, %f10
; S390X-NEXT: ldr %f4, %f9
; S390X-NEXT: ldr %f6, %f8
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 296(%r15)
; S390X-NEXT: br %r14
;
@@ -2298,7 +2298,7 @@ define <2 x double> @constrained_vector_cos_v2f64() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -168
; S390X-NEXT: .cfi_def_cfa_offset 328
-; S390X-NEXT: std %f8, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: larl %r1, .LCPI46_0
; S390X-NEXT: ld %f0, 0(%r1)
@@ -2309,7 +2309,7 @@ define <2 x double> @constrained_vector_cos_v2f64() #0 {
; S390X-NEXT: ldr %f0, %f1
; S390X-NEXT: brasl %r14, cos at PLT
; S390X-NEXT: ldr %f2, %f8
-; S390X-NEXT: ld %f8, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 280(%r15)
; S390X-NEXT: br %r14
;
@@ -2349,8 +2349,8 @@ define <3 x float> @constrained_vector_cos_v3f32() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -176
; S390X-NEXT: .cfi_def_cfa_offset 336
-; S390X-NEXT: std %f8, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: larl %r1, .LCPI47_0
@@ -2368,8 +2368,8 @@ define <3 x float> @constrained_vector_cos_v3f32() #0 {
; S390X-NEXT: brasl %r14, cosf at PLT
; S390X-NEXT: ler %f2, %f9
; S390X-NEXT: ler %f4, %f8
-; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 288(%r15)
; S390X-NEXT: br %r14
;
@@ -2418,9 +2418,9 @@ define void @constrained_vector_cos_v3f64(ptr %a) #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -2438,9 +2438,9 @@ define void @constrained_vector_cos_v3f64(ptr %a) #0 {
; S390X-NEXT: std %f0, 0(%r13)
; S390X-NEXT: std %f9, 8(%r13)
; S390X-NEXT: std %f10, 16(%r13)
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r13, %r15, 288(%r15)
; S390X-NEXT: br %r14
;
@@ -2452,7 +2452,7 @@ define void @constrained_vector_cos_v3f64(ptr %a) #0 {
; SZ13-NEXT: .cfi_offset %r15, -40
; SZ13-NEXT: aghi %r15, -200
; SZ13-NEXT: .cfi_def_cfa_offset 360
-; SZ13-NEXT: std %f8, 192(%r15) # 8-byte Folded Spill
+; SZ13-NEXT: std %f8, 192(%r15) # 8-byte Spill
; SZ13-NEXT: .cfi_offset %f8, -168
; SZ13-NEXT: vl %v0, 0(%r2), 4
; SZ13-NEXT: ld %f8, 16(%r2)
@@ -2474,7 +2474,7 @@ define void @constrained_vector_cos_v3f64(ptr %a) #0 {
; SZ13-NEXT: brasl %r14, cos at PLT
; SZ13-NEXT: std %f0, 16(%r13)
; SZ13-NEXT: vl %v0, 160(%r15), 3 # 16-byte Folded Reload
-; SZ13-NEXT: ld %f8, 192(%r15) # 8-byte Folded Reload
+; SZ13-NEXT: ld %f8, 192(%r15) # 8-byte Reload
; SZ13-NEXT: vst %v0, 0(%r13), 4
; SZ13-NEXT: lmg %r13, %r15, 304(%r15)
; SZ13-NEXT: br %r14
@@ -2496,9 +2496,9 @@ define <4 x double> @constrained_vector_cos_v4f64() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -2523,9 +2523,9 @@ define <4 x double> @constrained_vector_cos_v4f64() #0 {
; S390X-NEXT: ldr %f2, %f10
; S390X-NEXT: ldr %f4, %f9
; S390X-NEXT: ldr %f6, %f8
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 296(%r15)
; S390X-NEXT: br %r14
;
@@ -2615,7 +2615,7 @@ define <2 x double> @constrained_vector_exp_v2f64() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -168
; S390X-NEXT: .cfi_def_cfa_offset 328
-; S390X-NEXT: std %f8, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: larl %r1, .LCPI51_0
; S390X-NEXT: ld %f0, 0(%r1)
@@ -2626,7 +2626,7 @@ define <2 x double> @constrained_vector_exp_v2f64() #0 {
; S390X-NEXT: ldr %f0, %f1
; S390X-NEXT: brasl %r14, exp at PLT
; S390X-NEXT: ldr %f2, %f8
-; S390X-NEXT: ld %f8, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 280(%r15)
; S390X-NEXT: br %r14
;
@@ -2666,8 +2666,8 @@ define <3 x float> @constrained_vector_exp_v3f32() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -176
; S390X-NEXT: .cfi_def_cfa_offset 336
-; S390X-NEXT: std %f8, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: larl %r1, .LCPI52_0
@@ -2685,8 +2685,8 @@ define <3 x float> @constrained_vector_exp_v3f32() #0 {
; S390X-NEXT: brasl %r14, expf at PLT
; S390X-NEXT: ler %f2, %f9
; S390X-NEXT: ler %f4, %f8
-; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 288(%r15)
; S390X-NEXT: br %r14
;
@@ -2735,9 +2735,9 @@ define void @constrained_vector_exp_v3f64(ptr %a) #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -2755,9 +2755,9 @@ define void @constrained_vector_exp_v3f64(ptr %a) #0 {
; S390X-NEXT: std %f0, 0(%r13)
; S390X-NEXT: std %f9, 8(%r13)
; S390X-NEXT: std %f10, 16(%r13)
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r13, %r15, 288(%r15)
; S390X-NEXT: br %r14
;
@@ -2769,7 +2769,7 @@ define void @constrained_vector_exp_v3f64(ptr %a) #0 {
; SZ13-NEXT: .cfi_offset %r15, -40
; SZ13-NEXT: aghi %r15, -200
; SZ13-NEXT: .cfi_def_cfa_offset 360
-; SZ13-NEXT: std %f8, 192(%r15) # 8-byte Folded Spill
+; SZ13-NEXT: std %f8, 192(%r15) # 8-byte Spill
; SZ13-NEXT: .cfi_offset %f8, -168
; SZ13-NEXT: vl %v0, 0(%r2), 4
; SZ13-NEXT: ld %f8, 16(%r2)
@@ -2791,7 +2791,7 @@ define void @constrained_vector_exp_v3f64(ptr %a) #0 {
; SZ13-NEXT: brasl %r14, exp at PLT
; SZ13-NEXT: std %f0, 16(%r13)
; SZ13-NEXT: vl %v0, 160(%r15), 3 # 16-byte Folded Reload
-; SZ13-NEXT: ld %f8, 192(%r15) # 8-byte Folded Reload
+; SZ13-NEXT: ld %f8, 192(%r15) # 8-byte Reload
; SZ13-NEXT: vst %v0, 0(%r13), 4
; SZ13-NEXT: lmg %r13, %r15, 304(%r15)
; SZ13-NEXT: br %r14
@@ -2813,9 +2813,9 @@ define <4 x double> @constrained_vector_exp_v4f64() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -2840,9 +2840,9 @@ define <4 x double> @constrained_vector_exp_v4f64() #0 {
; S390X-NEXT: ldr %f2, %f10
; S390X-NEXT: ldr %f4, %f9
; S390X-NEXT: ldr %f6, %f8
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 296(%r15)
; S390X-NEXT: br %r14
;
@@ -2932,7 +2932,7 @@ define <2 x double> @constrained_vector_exp2_v2f64() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -168
; S390X-NEXT: .cfi_def_cfa_offset 328
-; S390X-NEXT: std %f8, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: larl %r1, .LCPI56_0
; S390X-NEXT: ld %f0, 0(%r1)
@@ -2943,7 +2943,7 @@ define <2 x double> @constrained_vector_exp2_v2f64() #0 {
; S390X-NEXT: ldr %f0, %f1
; S390X-NEXT: brasl %r14, exp2 at PLT
; S390X-NEXT: ldr %f2, %f8
-; S390X-NEXT: ld %f8, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 280(%r15)
; S390X-NEXT: br %r14
;
@@ -2983,8 +2983,8 @@ define <3 x float> @constrained_vector_exp2_v3f32() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -176
; S390X-NEXT: .cfi_def_cfa_offset 336
-; S390X-NEXT: std %f8, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: larl %r1, .LCPI57_0
@@ -3002,8 +3002,8 @@ define <3 x float> @constrained_vector_exp2_v3f32() #0 {
; S390X-NEXT: brasl %r14, exp2f at PLT
; S390X-NEXT: ler %f2, %f9
; S390X-NEXT: ler %f4, %f8
-; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 288(%r15)
; S390X-NEXT: br %r14
;
@@ -3052,9 +3052,9 @@ define void @constrained_vector_exp2_v3f64(ptr %a) #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -3072,9 +3072,9 @@ define void @constrained_vector_exp2_v3f64(ptr %a) #0 {
; S390X-NEXT: std %f0, 0(%r13)
; S390X-NEXT: std %f9, 8(%r13)
; S390X-NEXT: std %f10, 16(%r13)
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r13, %r15, 288(%r15)
; S390X-NEXT: br %r14
;
@@ -3086,7 +3086,7 @@ define void @constrained_vector_exp2_v3f64(ptr %a) #0 {
; SZ13-NEXT: .cfi_offset %r15, -40
; SZ13-NEXT: aghi %r15, -200
; SZ13-NEXT: .cfi_def_cfa_offset 360
-; SZ13-NEXT: std %f8, 192(%r15) # 8-byte Folded Spill
+; SZ13-NEXT: std %f8, 192(%r15) # 8-byte Spill
; SZ13-NEXT: .cfi_offset %f8, -168
; SZ13-NEXT: vl %v0, 0(%r2), 4
; SZ13-NEXT: ld %f8, 16(%r2)
@@ -3108,7 +3108,7 @@ define void @constrained_vector_exp2_v3f64(ptr %a) #0 {
; SZ13-NEXT: brasl %r14, exp2 at PLT
; SZ13-NEXT: std %f0, 16(%r13)
; SZ13-NEXT: vl %v0, 160(%r15), 3 # 16-byte Folded Reload
-; SZ13-NEXT: ld %f8, 192(%r15) # 8-byte Folded Reload
+; SZ13-NEXT: ld %f8, 192(%r15) # 8-byte Reload
; SZ13-NEXT: vst %v0, 0(%r13), 4
; SZ13-NEXT: lmg %r13, %r15, 304(%r15)
; SZ13-NEXT: br %r14
@@ -3130,9 +3130,9 @@ define <4 x double> @constrained_vector_exp2_v4f64() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -3157,9 +3157,9 @@ define <4 x double> @constrained_vector_exp2_v4f64() #0 {
; S390X-NEXT: ldr %f2, %f10
; S390X-NEXT: ldr %f4, %f9
; S390X-NEXT: ldr %f6, %f8
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 296(%r15)
; S390X-NEXT: br %r14
;
@@ -3249,7 +3249,7 @@ define <2 x double> @constrained_vector_log_v2f64() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -168
; S390X-NEXT: .cfi_def_cfa_offset 328
-; S390X-NEXT: std %f8, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: larl %r1, .LCPI61_0
; S390X-NEXT: ld %f0, 0(%r1)
@@ -3260,7 +3260,7 @@ define <2 x double> @constrained_vector_log_v2f64() #0 {
; S390X-NEXT: ldr %f0, %f1
; S390X-NEXT: brasl %r14, log at PLT
; S390X-NEXT: ldr %f2, %f8
-; S390X-NEXT: ld %f8, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 280(%r15)
; S390X-NEXT: br %r14
;
@@ -3300,8 +3300,8 @@ define <3 x float> @constrained_vector_log_v3f32() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -176
; S390X-NEXT: .cfi_def_cfa_offset 336
-; S390X-NEXT: std %f8, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: larl %r1, .LCPI62_0
@@ -3319,8 +3319,8 @@ define <3 x float> @constrained_vector_log_v3f32() #0 {
; S390X-NEXT: brasl %r14, logf at PLT
; S390X-NEXT: ler %f2, %f9
; S390X-NEXT: ler %f4, %f8
-; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 288(%r15)
; S390X-NEXT: br %r14
;
@@ -3369,9 +3369,9 @@ define void @constrained_vector_log_v3f64(ptr %a) #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -3389,9 +3389,9 @@ define void @constrained_vector_log_v3f64(ptr %a) #0 {
; S390X-NEXT: std %f0, 0(%r13)
; S390X-NEXT: std %f9, 8(%r13)
; S390X-NEXT: std %f10, 16(%r13)
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r13, %r15, 288(%r15)
; S390X-NEXT: br %r14
;
@@ -3403,7 +3403,7 @@ define void @constrained_vector_log_v3f64(ptr %a) #0 {
; SZ13-NEXT: .cfi_offset %r15, -40
; SZ13-NEXT: aghi %r15, -200
; SZ13-NEXT: .cfi_def_cfa_offset 360
-; SZ13-NEXT: std %f8, 192(%r15) # 8-byte Folded Spill
+; SZ13-NEXT: std %f8, 192(%r15) # 8-byte Spill
; SZ13-NEXT: .cfi_offset %f8, -168
; SZ13-NEXT: vl %v0, 0(%r2), 4
; SZ13-NEXT: ld %f8, 16(%r2)
@@ -3425,7 +3425,7 @@ define void @constrained_vector_log_v3f64(ptr %a) #0 {
; SZ13-NEXT: brasl %r14, log at PLT
; SZ13-NEXT: std %f0, 16(%r13)
; SZ13-NEXT: vl %v0, 160(%r15), 3 # 16-byte Folded Reload
-; SZ13-NEXT: ld %f8, 192(%r15) # 8-byte Folded Reload
+; SZ13-NEXT: ld %f8, 192(%r15) # 8-byte Reload
; SZ13-NEXT: vst %v0, 0(%r13), 4
; SZ13-NEXT: lmg %r13, %r15, 304(%r15)
; SZ13-NEXT: br %r14
@@ -3447,9 +3447,9 @@ define <4 x double> @constrained_vector_log_v4f64() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -3474,9 +3474,9 @@ define <4 x double> @constrained_vector_log_v4f64() #0 {
; S390X-NEXT: ldr %f2, %f10
; S390X-NEXT: ldr %f4, %f9
; S390X-NEXT: ldr %f6, %f8
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 296(%r15)
; S390X-NEXT: br %r14
;
@@ -3566,7 +3566,7 @@ define <2 x double> @constrained_vector_log10_v2f64() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -168
; S390X-NEXT: .cfi_def_cfa_offset 328
-; S390X-NEXT: std %f8, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: larl %r1, .LCPI66_0
; S390X-NEXT: ld %f0, 0(%r1)
@@ -3577,7 +3577,7 @@ define <2 x double> @constrained_vector_log10_v2f64() #0 {
; S390X-NEXT: ldr %f0, %f1
; S390X-NEXT: brasl %r14, log10 at PLT
; S390X-NEXT: ldr %f2, %f8
-; S390X-NEXT: ld %f8, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 280(%r15)
; S390X-NEXT: br %r14
;
@@ -3617,8 +3617,8 @@ define <3 x float> @constrained_vector_log10_v3f32() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -176
; S390X-NEXT: .cfi_def_cfa_offset 336
-; S390X-NEXT: std %f8, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: larl %r1, .LCPI67_0
@@ -3636,8 +3636,8 @@ define <3 x float> @constrained_vector_log10_v3f32() #0 {
; S390X-NEXT: brasl %r14, log10f at PLT
; S390X-NEXT: ler %f2, %f9
; S390X-NEXT: ler %f4, %f8
-; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 288(%r15)
; S390X-NEXT: br %r14
;
@@ -3686,9 +3686,9 @@ define void @constrained_vector_log10_v3f64(ptr %a) #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -3706,9 +3706,9 @@ define void @constrained_vector_log10_v3f64(ptr %a) #0 {
; S390X-NEXT: std %f0, 0(%r13)
; S390X-NEXT: std %f9, 8(%r13)
; S390X-NEXT: std %f10, 16(%r13)
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r13, %r15, 288(%r15)
; S390X-NEXT: br %r14
;
@@ -3720,7 +3720,7 @@ define void @constrained_vector_log10_v3f64(ptr %a) #0 {
; SZ13-NEXT: .cfi_offset %r15, -40
; SZ13-NEXT: aghi %r15, -200
; SZ13-NEXT: .cfi_def_cfa_offset 360
-; SZ13-NEXT: std %f8, 192(%r15) # 8-byte Folded Spill
+; SZ13-NEXT: std %f8, 192(%r15) # 8-byte Spill
; SZ13-NEXT: .cfi_offset %f8, -168
; SZ13-NEXT: vl %v0, 0(%r2), 4
; SZ13-NEXT: ld %f8, 16(%r2)
@@ -3742,7 +3742,7 @@ define void @constrained_vector_log10_v3f64(ptr %a) #0 {
; SZ13-NEXT: brasl %r14, log10 at PLT
; SZ13-NEXT: std %f0, 16(%r13)
; SZ13-NEXT: vl %v0, 160(%r15), 3 # 16-byte Folded Reload
-; SZ13-NEXT: ld %f8, 192(%r15) # 8-byte Folded Reload
+; SZ13-NEXT: ld %f8, 192(%r15) # 8-byte Reload
; SZ13-NEXT: vst %v0, 0(%r13), 4
; SZ13-NEXT: lmg %r13, %r15, 304(%r15)
; SZ13-NEXT: br %r14
@@ -3764,9 +3764,9 @@ define <4 x double> @constrained_vector_log10_v4f64() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -3791,9 +3791,9 @@ define <4 x double> @constrained_vector_log10_v4f64() #0 {
; S390X-NEXT: ldr %f2, %f10
; S390X-NEXT: ldr %f4, %f9
; S390X-NEXT: ldr %f6, %f8
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 296(%r15)
; S390X-NEXT: br %r14
;
@@ -3883,7 +3883,7 @@ define <2 x double> @constrained_vector_log2_v2f64() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -168
; S390X-NEXT: .cfi_def_cfa_offset 328
-; S390X-NEXT: std %f8, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: larl %r1, .LCPI71_0
; S390X-NEXT: ld %f0, 0(%r1)
@@ -3894,7 +3894,7 @@ define <2 x double> @constrained_vector_log2_v2f64() #0 {
; S390X-NEXT: ldr %f0, %f1
; S390X-NEXT: brasl %r14, log2 at PLT
; S390X-NEXT: ldr %f2, %f8
-; S390X-NEXT: ld %f8, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 280(%r15)
; S390X-NEXT: br %r14
;
@@ -3934,8 +3934,8 @@ define <3 x float> @constrained_vector_log2_v3f32() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -176
; S390X-NEXT: .cfi_def_cfa_offset 336
-; S390X-NEXT: std %f8, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: larl %r1, .LCPI72_0
@@ -3953,8 +3953,8 @@ define <3 x float> @constrained_vector_log2_v3f32() #0 {
; S390X-NEXT: brasl %r14, log2f at PLT
; S390X-NEXT: ler %f2, %f9
; S390X-NEXT: ler %f4, %f8
-; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 288(%r15)
; S390X-NEXT: br %r14
;
@@ -4003,9 +4003,9 @@ define void @constrained_vector_log2_v3f64(ptr %a) #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -4023,9 +4023,9 @@ define void @constrained_vector_log2_v3f64(ptr %a) #0 {
; S390X-NEXT: std %f0, 0(%r13)
; S390X-NEXT: std %f9, 8(%r13)
; S390X-NEXT: std %f10, 16(%r13)
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r13, %r15, 288(%r15)
; S390X-NEXT: br %r14
;
@@ -4037,7 +4037,7 @@ define void @constrained_vector_log2_v3f64(ptr %a) #0 {
; SZ13-NEXT: .cfi_offset %r15, -40
; SZ13-NEXT: aghi %r15, -200
; SZ13-NEXT: .cfi_def_cfa_offset 360
-; SZ13-NEXT: std %f8, 192(%r15) # 8-byte Folded Spill
+; SZ13-NEXT: std %f8, 192(%r15) # 8-byte Spill
; SZ13-NEXT: .cfi_offset %f8, -168
; SZ13-NEXT: vl %v0, 0(%r2), 4
; SZ13-NEXT: ld %f8, 16(%r2)
@@ -4059,7 +4059,7 @@ define void @constrained_vector_log2_v3f64(ptr %a) #0 {
; SZ13-NEXT: brasl %r14, log2 at PLT
; SZ13-NEXT: std %f0, 16(%r13)
; SZ13-NEXT: vl %v0, 160(%r15), 3 # 16-byte Folded Reload
-; SZ13-NEXT: ld %f8, 192(%r15) # 8-byte Folded Reload
+; SZ13-NEXT: ld %f8, 192(%r15) # 8-byte Reload
; SZ13-NEXT: vst %v0, 0(%r13), 4
; SZ13-NEXT: lmg %r13, %r15, 304(%r15)
; SZ13-NEXT: br %r14
@@ -4081,9 +4081,9 @@ define <4 x double> @constrained_vector_log2_v4f64() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -4108,9 +4108,9 @@ define <4 x double> @constrained_vector_log2_v4f64() #0 {
; S390X-NEXT: ldr %f2, %f10
; S390X-NEXT: ldr %f4, %f9
; S390X-NEXT: ldr %f6, %f8
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 296(%r15)
; S390X-NEXT: br %r14
;
@@ -4334,8 +4334,8 @@ define <2 x double> @constrained_vector_nearbyint_v2f64(ptr %a) #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -176
; S390X-NEXT: .cfi_def_cfa_offset 336
-; S390X-NEXT: std %f8, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: ld %f0, 8(%r2)
@@ -4345,8 +4345,8 @@ define <2 x double> @constrained_vector_nearbyint_v2f64(ptr %a) #0 {
; S390X-NEXT: ldr %f0, %f8
; S390X-NEXT: brasl %r14, nearbyint at PLT
; S390X-NEXT: ldr %f2, %f9
-; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 288(%r15)
; S390X-NEXT: br %r14
;
@@ -4372,9 +4372,9 @@ define <3 x float> @constrained_vector_nearbyint_v3f32(ptr %a) #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -4393,9 +4393,9 @@ define <3 x float> @constrained_vector_nearbyint_v3f32(ptr %a) #0 {
; S390X-NEXT: brasl %r14, nearbyintf at PLT
; S390X-NEXT: ler %f2, %f9
; S390X-NEXT: ler %f4, %f10
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 296(%r15)
; S390X-NEXT: br %r14
;
@@ -4429,9 +4429,9 @@ define void @constrained_vector_nearbyint_v3f64(ptr %a) #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -4449,9 +4449,9 @@ define void @constrained_vector_nearbyint_v3f64(ptr %a) #0 {
; S390X-NEXT: std %f0, 0(%r13)
; S390X-NEXT: std %f9, 8(%r13)
; S390X-NEXT: std %f10, 16(%r13)
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r13, %r15, 288(%r15)
; S390X-NEXT: br %r14
;
@@ -4482,10 +4482,10 @@ define <4 x double> @constrained_vector_nearbyint_v4f64(ptr %a) #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -192
; S390X-NEXT: .cfi_def_cfa_offset 352
-; S390X-NEXT: std %f8, 184(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f11, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 184(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f11, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -4507,10 +4507,10 @@ define <4 x double> @constrained_vector_nearbyint_v4f64(ptr %a) #0 {
; S390X-NEXT: ldr %f2, %f9
; S390X-NEXT: ldr %f4, %f10
; S390X-NEXT: ldr %f6, %f11
-; S390X-NEXT: ld %f8, 184(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f11, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 184(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f11, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 304(%r15)
; S390X-NEXT: br %r14
;
@@ -4577,7 +4577,7 @@ define <2 x double> @constrained_vector_maxnum_v2f64() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -168
; S390X-NEXT: .cfi_def_cfa_offset 328
-; S390X-NEXT: std %f8, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: larl %r1, .LCPI86_0
; S390X-NEXT: ld %f0, 0(%r1)
@@ -4592,7 +4592,7 @@ define <2 x double> @constrained_vector_maxnum_v2f64() #0 {
; S390X-NEXT: ldr %f0, %f1
; S390X-NEXT: brasl %r14, fmax at PLT
; S390X-NEXT: ldr %f2, %f8
-; S390X-NEXT: ld %f8, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 280(%r15)
; S390X-NEXT: br %r14
;
@@ -4636,9 +4636,9 @@ define <3 x float> @constrained_vector_maxnum_v3f32() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -4662,9 +4662,9 @@ define <3 x float> @constrained_vector_maxnum_v3f32() #0 {
; S390X-NEXT: brasl %r14, fmaxf at PLT
; S390X-NEXT: ler %f2, %f10
; S390X-NEXT: ler %f4, %f9
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 296(%r15)
; S390X-NEXT: br %r14
;
@@ -4675,7 +4675,7 @@ define <3 x float> @constrained_vector_maxnum_v3f32() #0 {
; SZ13-NEXT: .cfi_offset %r15, -40
; SZ13-NEXT: aghi %r15, -200
; SZ13-NEXT: .cfi_def_cfa_offset 360
-; SZ13-NEXT: std %f8, 192(%r15) # 8-byte Folded Spill
+; SZ13-NEXT: std %f8, 192(%r15) # 8-byte Spill
; SZ13-NEXT: .cfi_offset %f8, -168
; SZ13-NEXT: larl %r1, .LCPI87_0
; SZ13-NEXT: lde %f0, 0(%r1)
@@ -4700,7 +4700,7 @@ define <3 x float> @constrained_vector_maxnum_v3f32() #0 {
; SZ13-NEXT: # kill: def $f0s killed $f0s def $v0
; SZ13-NEXT: vmrhf %v0, %v1, %v0
; SZ13-NEXT: vl %v1, 176(%r15), 3 # 16-byte Folded Reload
-; SZ13-NEXT: ld %f8, 192(%r15) # 8-byte Folded Reload
+; SZ13-NEXT: ld %f8, 192(%r15) # 8-byte Reload
; SZ13-NEXT: vrepf %v1, %v1, 0
; SZ13-NEXT: vmrhg %v24, %v0, %v1
; SZ13-NEXT: lmg %r14, %r15, 312(%r15)
@@ -4722,9 +4722,9 @@ define void @constrained_vector_log10_maxnum_v3f64(ptr %a) #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -4748,9 +4748,9 @@ define void @constrained_vector_log10_maxnum_v3f64(ptr %a) #0 {
; S390X-NEXT: std %f0, 0(%r13)
; S390X-NEXT: std %f9, 8(%r13)
; S390X-NEXT: std %f10, 16(%r13)
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r13, %r15, 288(%r15)
; S390X-NEXT: br %r14
;
@@ -4762,7 +4762,7 @@ define void @constrained_vector_log10_maxnum_v3f64(ptr %a) #0 {
; SZ13-NEXT: .cfi_offset %r15, -40
; SZ13-NEXT: aghi %r15, -200
; SZ13-NEXT: .cfi_def_cfa_offset 360
-; SZ13-NEXT: std %f8, 192(%r15) # 8-byte Folded Spill
+; SZ13-NEXT: std %f8, 192(%r15) # 8-byte Spill
; SZ13-NEXT: .cfi_offset %f8, -168
; SZ13-NEXT: larl %r1, .LCPI88_0
; SZ13-NEXT: vl %v0, 0(%r2), 4
@@ -4790,7 +4790,7 @@ define void @constrained_vector_log10_maxnum_v3f64(ptr %a) #0 {
; SZ13-NEXT: brasl %r14, fmax at PLT
; SZ13-NEXT: std %f0, 16(%r13)
; SZ13-NEXT: vl %v0, 160(%r15), 3 # 16-byte Folded Reload
-; SZ13-NEXT: ld %f8, 192(%r15) # 8-byte Folded Reload
+; SZ13-NEXT: ld %f8, 192(%r15) # 8-byte Reload
; SZ13-NEXT: vst %v0, 0(%r13), 4
; SZ13-NEXT: lmg %r13, %r15, 304(%r15)
; SZ13-NEXT: br %r14
@@ -4812,9 +4812,9 @@ define <4 x double> @constrained_vector_maxnum_v4f64() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -4847,9 +4847,9 @@ define <4 x double> @constrained_vector_maxnum_v4f64() #0 {
; S390X-NEXT: ldr %f2, %f10
; S390X-NEXT: ldr %f4, %f9
; S390X-NEXT: ldr %f6, %f8
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 296(%r15)
; S390X-NEXT: br %r14
;
@@ -4951,7 +4951,7 @@ define <2 x double> @constrained_vector_minnum_v2f64() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -168
; S390X-NEXT: .cfi_def_cfa_offset 328
-; S390X-NEXT: std %f8, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: larl %r1, .LCPI91_0
; S390X-NEXT: ld %f0, 0(%r1)
@@ -4966,7 +4966,7 @@ define <2 x double> @constrained_vector_minnum_v2f64() #0 {
; S390X-NEXT: ldr %f0, %f1
; S390X-NEXT: brasl %r14, fmin at PLT
; S390X-NEXT: ldr %f2, %f8
-; S390X-NEXT: ld %f8, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 280(%r15)
; S390X-NEXT: br %r14
;
@@ -5010,9 +5010,9 @@ define <3 x float> @constrained_vector_minnum_v3f32() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -5036,9 +5036,9 @@ define <3 x float> @constrained_vector_minnum_v3f32() #0 {
; S390X-NEXT: brasl %r14, fminf at PLT
; S390X-NEXT: ler %f2, %f10
; S390X-NEXT: ler %f4, %f9
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 296(%r15)
; S390X-NEXT: br %r14
;
@@ -5049,7 +5049,7 @@ define <3 x float> @constrained_vector_minnum_v3f32() #0 {
; SZ13-NEXT: .cfi_offset %r15, -40
; SZ13-NEXT: aghi %r15, -200
; SZ13-NEXT: .cfi_def_cfa_offset 360
-; SZ13-NEXT: std %f8, 192(%r15) # 8-byte Folded Spill
+; SZ13-NEXT: std %f8, 192(%r15) # 8-byte Spill
; SZ13-NEXT: .cfi_offset %f8, -168
; SZ13-NEXT: larl %r1, .LCPI92_0
; SZ13-NEXT: lde %f0, 0(%r1)
@@ -5074,7 +5074,7 @@ define <3 x float> @constrained_vector_minnum_v3f32() #0 {
; SZ13-NEXT: # kill: def $f0s killed $f0s def $v0
; SZ13-NEXT: vmrhf %v0, %v1, %v0
; SZ13-NEXT: vl %v1, 176(%r15), 3 # 16-byte Folded Reload
-; SZ13-NEXT: ld %f8, 192(%r15) # 8-byte Folded Reload
+; SZ13-NEXT: ld %f8, 192(%r15) # 8-byte Reload
; SZ13-NEXT: vrepf %v1, %v1, 0
; SZ13-NEXT: vmrhg %v24, %v0, %v1
; SZ13-NEXT: lmg %r14, %r15, 312(%r15)
@@ -5096,10 +5096,10 @@ define void @constrained_vector_minnum_v3f64(ptr %a) #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -192
; S390X-NEXT: .cfi_def_cfa_offset 352
-; S390X-NEXT: std %f8, 184(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f11, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 184(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f11, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -5123,10 +5123,10 @@ define void @constrained_vector_minnum_v3f64(ptr %a) #0 {
; S390X-NEXT: std %f0, 0(%r13)
; S390X-NEXT: std %f10, 8(%r13)
; S390X-NEXT: std %f11, 16(%r13)
-; S390X-NEXT: ld %f8, 184(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f11, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 184(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f11, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r13, %r15, 296(%r15)
; S390X-NEXT: br %r14
;
@@ -5138,8 +5138,8 @@ define void @constrained_vector_minnum_v3f64(ptr %a) #0 {
; SZ13-NEXT: .cfi_offset %r15, -40
; SZ13-NEXT: aghi %r15, -208
; SZ13-NEXT: .cfi_def_cfa_offset 368
-; SZ13-NEXT: std %f8, 200(%r15) # 8-byte Folded Spill
-; SZ13-NEXT: std %f9, 192(%r15) # 8-byte Folded Spill
+; SZ13-NEXT: std %f8, 200(%r15) # 8-byte Spill
+; SZ13-NEXT: std %f9, 192(%r15) # 8-byte Spill
; SZ13-NEXT: .cfi_offset %f8, -168
; SZ13-NEXT: .cfi_offset %f9, -176
; SZ13-NEXT: larl %r1, .LCPI93_0
@@ -5167,8 +5167,8 @@ define void @constrained_vector_minnum_v3f64(ptr %a) #0 {
; SZ13-NEXT: brasl %r14, fmin at PLT
; SZ13-NEXT: std %f0, 16(%r13)
; SZ13-NEXT: vl %v0, 160(%r15), 3 # 16-byte Folded Reload
-; SZ13-NEXT: ld %f8, 200(%r15) # 8-byte Folded Reload
-; SZ13-NEXT: ld %f9, 192(%r15) # 8-byte Folded Reload
+; SZ13-NEXT: ld %f8, 200(%r15) # 8-byte Reload
+; SZ13-NEXT: ld %f9, 192(%r15) # 8-byte Reload
; SZ13-NEXT: vst %v0, 0(%r13), 4
; SZ13-NEXT: lmg %r13, %r15, 312(%r15)
; SZ13-NEXT: br %r14
@@ -5190,9 +5190,9 @@ define <4 x double> @constrained_vector_minnum_v4f64() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -5225,9 +5225,9 @@ define <4 x double> @constrained_vector_minnum_v4f64() #0 {
; S390X-NEXT: ldr %f2, %f10
; S390X-NEXT: ldr %f4, %f9
; S390X-NEXT: ldr %f6, %f8
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 296(%r15)
; S390X-NEXT: br %r14
;
@@ -5567,8 +5567,8 @@ define <2 x double> @constrained_vector_ceil_v2f64(ptr %a) #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -176
; S390X-NEXT: .cfi_def_cfa_offset 336
-; S390X-NEXT: std %f8, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: ld %f0, 8(%r2)
@@ -5578,8 +5578,8 @@ define <2 x double> @constrained_vector_ceil_v2f64(ptr %a) #0 {
; S390X-NEXT: ldr %f0, %f8
; S390X-NEXT: brasl %r14, ceil at PLT
; S390X-NEXT: ldr %f2, %f9
-; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 288(%r15)
; S390X-NEXT: br %r14
;
@@ -5604,9 +5604,9 @@ define <3 x float> @constrained_vector_ceil_v3f32(ptr %a) #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -5625,9 +5625,9 @@ define <3 x float> @constrained_vector_ceil_v3f32(ptr %a) #0 {
; S390X-NEXT: brasl %r14, ceilf at PLT
; S390X-NEXT: ler %f2, %f9
; S390X-NEXT: ler %f4, %f10
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 296(%r15)
; S390X-NEXT: br %r14
;
@@ -5660,9 +5660,9 @@ define void @constrained_vector_ceil_v3f64(ptr %a) #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -5680,9 +5680,9 @@ define void @constrained_vector_ceil_v3f64(ptr %a) #0 {
; S390X-NEXT: std %f0, 0(%r13)
; S390X-NEXT: std %f9, 8(%r13)
; S390X-NEXT: std %f10, 16(%r13)
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r13, %r15, 288(%r15)
; S390X-NEXT: br %r14
;
@@ -5740,8 +5740,8 @@ define <2 x double> @constrained_vector_floor_v2f64(ptr %a) #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -176
; S390X-NEXT: .cfi_def_cfa_offset 336
-; S390X-NEXT: std %f8, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: ld %f0, 8(%r2)
@@ -5751,8 +5751,8 @@ define <2 x double> @constrained_vector_floor_v2f64(ptr %a) #0 {
; S390X-NEXT: ldr %f0, %f8
; S390X-NEXT: brasl %r14, floor at PLT
; S390X-NEXT: ldr %f2, %f9
-; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 288(%r15)
; S390X-NEXT: br %r14
;
@@ -5777,9 +5777,9 @@ define <3 x float> @constrained_vector_floor_v3f32(ptr %a) #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -5798,9 +5798,9 @@ define <3 x float> @constrained_vector_floor_v3f32(ptr %a) #0 {
; S390X-NEXT: brasl %r14, floorf at PLT
; S390X-NEXT: ler %f2, %f9
; S390X-NEXT: ler %f4, %f10
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 296(%r15)
; S390X-NEXT: br %r14
;
@@ -5833,9 +5833,9 @@ define void @constrained_vector_floor_v3f64(ptr %a) #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -5853,9 +5853,9 @@ define void @constrained_vector_floor_v3f64(ptr %a) #0 {
; S390X-NEXT: std %f0, 0(%r13)
; S390X-NEXT: std %f9, 8(%r13)
; S390X-NEXT: std %f10, 16(%r13)
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r13, %r15, 288(%r15)
; S390X-NEXT: br %r14
;
@@ -5912,8 +5912,8 @@ define <2 x double> @constrained_vector_round_v2f64(ptr %a) #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -176
; S390X-NEXT: .cfi_def_cfa_offset 336
-; S390X-NEXT: std %f8, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: ld %f0, 8(%r2)
@@ -5923,8 +5923,8 @@ define <2 x double> @constrained_vector_round_v2f64(ptr %a) #0 {
; S390X-NEXT: ldr %f0, %f8
; S390X-NEXT: brasl %r14, round at PLT
; S390X-NEXT: ldr %f2, %f9
-; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 288(%r15)
; S390X-NEXT: br %r14
;
@@ -5949,9 +5949,9 @@ define <3 x float> @constrained_vector_round_v3f32(ptr %a) #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -5970,9 +5970,9 @@ define <3 x float> @constrained_vector_round_v3f32(ptr %a) #0 {
; S390X-NEXT: brasl %r14, roundf at PLT
; S390X-NEXT: ler %f2, %f9
; S390X-NEXT: ler %f4, %f10
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 296(%r15)
; S390X-NEXT: br %r14
;
@@ -6006,9 +6006,9 @@ define void @constrained_vector_round_v3f64(ptr %a) #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -6026,9 +6026,9 @@ define void @constrained_vector_round_v3f64(ptr %a) #0 {
; S390X-NEXT: std %f0, 0(%r13)
; S390X-NEXT: std %f9, 8(%r13)
; S390X-NEXT: std %f10, 16(%r13)
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r13, %r15, 288(%r15)
; S390X-NEXT: br %r14
;
@@ -6085,8 +6085,8 @@ define <2 x double> @constrained_vector_roundeven_v2f64(ptr %a) #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -176
; S390X-NEXT: .cfi_def_cfa_offset 336
-; S390X-NEXT: std %f8, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: ld %f0, 8(%r2)
@@ -6096,8 +6096,8 @@ define <2 x double> @constrained_vector_roundeven_v2f64(ptr %a) #0 {
; S390X-NEXT: ldr %f0, %f8
; S390X-NEXT: brasl %r14, roundeven at PLT
; S390X-NEXT: ldr %f2, %f9
-; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 288(%r15)
; S390X-NEXT: br %r14
;
@@ -6122,9 +6122,9 @@ define <3 x float> @constrained_vector_roundeven_v3f32(ptr %a) #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -6143,9 +6143,9 @@ define <3 x float> @constrained_vector_roundeven_v3f32(ptr %a) #0 {
; S390X-NEXT: brasl %r14, roundevenf at PLT
; S390X-NEXT: ler %f2, %f9
; S390X-NEXT: ler %f4, %f10
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 296(%r15)
; S390X-NEXT: br %r14
;
@@ -6178,9 +6178,9 @@ define void @constrained_vector_roundeven_v3f64(ptr %a) #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -6198,9 +6198,9 @@ define void @constrained_vector_roundeven_v3f64(ptr %a) #0 {
; S390X-NEXT: std %f0, 0(%r13)
; S390X-NEXT: std %f9, 8(%r13)
; S390X-NEXT: std %f10, 16(%r13)
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r13, %r15, 288(%r15)
; S390X-NEXT: br %r14
;
@@ -6257,8 +6257,8 @@ define <2 x double> @constrained_vector_trunc_v2f64(ptr %a) #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -176
; S390X-NEXT: .cfi_def_cfa_offset 336
-; S390X-NEXT: std %f8, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: ld %f0, 8(%r2)
@@ -6268,8 +6268,8 @@ define <2 x double> @constrained_vector_trunc_v2f64(ptr %a) #0 {
; S390X-NEXT: ldr %f0, %f8
; S390X-NEXT: brasl %r14, trunc at PLT
; S390X-NEXT: ldr %f2, %f9
-; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 288(%r15)
; S390X-NEXT: br %r14
;
@@ -6294,9 +6294,9 @@ define <3 x float> @constrained_vector_trunc_v3f32(ptr %a) #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -6315,9 +6315,9 @@ define <3 x float> @constrained_vector_trunc_v3f32(ptr %a) #0 {
; S390X-NEXT: brasl %r14, truncf at PLT
; S390X-NEXT: ler %f2, %f9
; S390X-NEXT: ler %f4, %f10
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 296(%r15)
; S390X-NEXT: br %r14
;
@@ -6350,9 +6350,9 @@ define void @constrained_vector_trunc_v3f64(ptr %a) #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -6370,9 +6370,9 @@ define void @constrained_vector_trunc_v3f64(ptr %a) #0 {
; S390X-NEXT: std %f0, 0(%r13)
; S390X-NEXT: std %f9, 8(%r13)
; S390X-NEXT: std %f10, 16(%r13)
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r13, %r15, 288(%r15)
; S390X-NEXT: br %r14
;
@@ -6438,7 +6438,7 @@ define <2 x double> @constrained_vector_tan_v2f64() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -168
; S390X-NEXT: .cfi_def_cfa_offset 328
-; S390X-NEXT: std %f8, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: larl %r1, .LCPI124_0
; S390X-NEXT: ld %f0, 0(%r1)
@@ -6449,7 +6449,7 @@ define <2 x double> @constrained_vector_tan_v2f64() #0 {
; S390X-NEXT: ldr %f0, %f1
; S390X-NEXT: brasl %r14, tan at PLT
; S390X-NEXT: ldr %f2, %f8
-; S390X-NEXT: ld %f8, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 280(%r15)
; S390X-NEXT: br %r14
;
@@ -6489,8 +6489,8 @@ define <3 x float> @constrained_vector_tan_v3f32() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -176
; S390X-NEXT: .cfi_def_cfa_offset 336
-; S390X-NEXT: std %f8, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: larl %r1, .LCPI125_0
@@ -6508,8 +6508,8 @@ define <3 x float> @constrained_vector_tan_v3f32() #0 {
; S390X-NEXT: brasl %r14, tanf at PLT
; S390X-NEXT: ler %f2, %f9
; S390X-NEXT: ler %f4, %f8
-; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 288(%r15)
; S390X-NEXT: br %r14
;
@@ -6558,9 +6558,9 @@ define void @constrained_vector_tan_v3f64(ptr %a) #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -6578,9 +6578,9 @@ define void @constrained_vector_tan_v3f64(ptr %a) #0 {
; S390X-NEXT: std %f0, 0(%r13)
; S390X-NEXT: std %f9, 8(%r13)
; S390X-NEXT: std %f10, 16(%r13)
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r13, %r15, 288(%r15)
; S390X-NEXT: br %r14
;
@@ -6592,7 +6592,7 @@ define void @constrained_vector_tan_v3f64(ptr %a) #0 {
; SZ13-NEXT: .cfi_offset %r15, -40
; SZ13-NEXT: aghi %r15, -200
; SZ13-NEXT: .cfi_def_cfa_offset 360
-; SZ13-NEXT: std %f8, 192(%r15) # 8-byte Folded Spill
+; SZ13-NEXT: std %f8, 192(%r15) # 8-byte Spill
; SZ13-NEXT: .cfi_offset %f8, -168
; SZ13-NEXT: vl %v0, 0(%r2), 4
; SZ13-NEXT: ld %f8, 16(%r2)
@@ -6614,7 +6614,7 @@ define void @constrained_vector_tan_v3f64(ptr %a) #0 {
; SZ13-NEXT: brasl %r14, tan at PLT
; SZ13-NEXT: std %f0, 16(%r13)
; SZ13-NEXT: vl %v0, 160(%r15), 3 # 16-byte Folded Reload
-; SZ13-NEXT: ld %f8, 192(%r15) # 8-byte Folded Reload
+; SZ13-NEXT: ld %f8, 192(%r15) # 8-byte Reload
; SZ13-NEXT: vst %v0, 0(%r13), 4
; SZ13-NEXT: lmg %r13, %r15, 304(%r15)
; SZ13-NEXT: br %r14
@@ -6636,9 +6636,9 @@ define <4 x double> @constrained_vector_tan_v4f64() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -6663,9 +6663,9 @@ define <4 x double> @constrained_vector_tan_v4f64() #0 {
; S390X-NEXT: ldr %f2, %f10
; S390X-NEXT: ldr %f4, %f9
; S390X-NEXT: ldr %f6, %f8
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 296(%r15)
; S390X-NEXT: br %r14
;
@@ -6760,7 +6760,7 @@ define <2 x double> @constrained_vector_atan2_v2f64() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -168
; S390X-NEXT: .cfi_def_cfa_offset 328
-; S390X-NEXT: std %f8, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: larl %r1, .LCPI129_0
; S390X-NEXT: ld %f0, 0(%r1)
@@ -6775,7 +6775,7 @@ define <2 x double> @constrained_vector_atan2_v2f64() #0 {
; S390X-NEXT: ldr %f0, %f1
; S390X-NEXT: brasl %r14, atan2 at PLT
; S390X-NEXT: ldr %f2, %f8
-; S390X-NEXT: ld %f8, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 280(%r15)
; S390X-NEXT: br %r14
;
@@ -6820,8 +6820,8 @@ define <3 x float> @constrained_vector_atan2_v3f32() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -176
; S390X-NEXT: .cfi_def_cfa_offset 336
-; S390X-NEXT: std %f8, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: larl %r1, .LCPI130_0
@@ -6845,8 +6845,8 @@ define <3 x float> @constrained_vector_atan2_v3f32() #0 {
; S390X-NEXT: brasl %r14, atan2f at PLT
; S390X-NEXT: ler %f2, %f9
; S390X-NEXT: ler %f4, %f8
-; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 288(%r15)
; S390X-NEXT: br %r14
;
@@ -6902,11 +6902,11 @@ define void @constrained_vector_atan2_v3f64(ptr %a, ptr %b) #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -200
; S390X-NEXT: .cfi_def_cfa_offset 360
-; S390X-NEXT: std %f8, 192(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 184(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f11, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f12, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 192(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 184(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f11, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f12, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -6931,11 +6931,11 @@ define void @constrained_vector_atan2_v3f64(ptr %a, ptr %b) #0 {
; S390X-NEXT: std %f0, 0(%r13)
; S390X-NEXT: std %f9, 8(%r13)
; S390X-NEXT: std %f12, 16(%r13)
-; S390X-NEXT: ld %f8, 192(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 184(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f11, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f12, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 192(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 184(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f11, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f12, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r13, %r15, 304(%r15)
; S390X-NEXT: br %r14
;
@@ -6947,8 +6947,8 @@ define void @constrained_vector_atan2_v3f64(ptr %a, ptr %b) #0 {
; SZ13-NEXT: .cfi_offset %r15, -40
; SZ13-NEXT: aghi %r15, -224
; SZ13-NEXT: .cfi_def_cfa_offset 384
-; SZ13-NEXT: std %f8, 216(%r15) # 8-byte Folded Spill
-; SZ13-NEXT: std %f9, 208(%r15) # 8-byte Folded Spill
+; SZ13-NEXT: std %f8, 216(%r15) # 8-byte Spill
+; SZ13-NEXT: std %f9, 208(%r15) # 8-byte Spill
; SZ13-NEXT: .cfi_offset %f8, -168
; SZ13-NEXT: .cfi_offset %f9, -176
; SZ13-NEXT: vl %v0, 0(%r2), 4
@@ -6979,8 +6979,8 @@ define void @constrained_vector_atan2_v3f64(ptr %a, ptr %b) #0 {
; SZ13-NEXT: brasl %r14, atan2 at PLT
; SZ13-NEXT: std %f0, 16(%r13)
; SZ13-NEXT: vl %v0, 160(%r15), 3 # 16-byte Folded Reload
-; SZ13-NEXT: ld %f8, 216(%r15) # 8-byte Folded Reload
-; SZ13-NEXT: ld %f9, 208(%r15) # 8-byte Folded Reload
+; SZ13-NEXT: ld %f8, 216(%r15) # 8-byte Reload
+; SZ13-NEXT: ld %f9, 208(%r15) # 8-byte Reload
; SZ13-NEXT: vst %v0, 0(%r13), 4
; SZ13-NEXT: lmg %r13, %r15, 328(%r15)
; SZ13-NEXT: br %r14
@@ -7004,9 +7004,9 @@ define <4 x double> @constrained_vector_atan2_v4f64() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -7039,9 +7039,9 @@ define <4 x double> @constrained_vector_atan2_v4f64() #0 {
; S390X-NEXT: ldr %f2, %f10
; S390X-NEXT: ldr %f4, %f9
; S390X-NEXT: ldr %f6, %f8
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 296(%r15)
; S390X-NEXT: br %r14
;
diff --git a/llvm/test/CodeGen/SystemZ/zos-prologue-epilog.ll b/llvm/test/CodeGen/SystemZ/zos-prologue-epilog.ll
index 40813a7bc18de..428d9bbe64c53 100644
--- a/llvm/test/CodeGen/SystemZ/zos-prologue-epilog.ll
+++ b/llvm/test/CodeGen/SystemZ/zos-prologue-epilog.ll
@@ -87,14 +87,14 @@ define void @func1(ptr %ptr) {
; CHECK-LABEL: func2
; CHECK64: stmg 6,7,1744(4)
; CHECK64: aghi 4,-320
-; CHECK64: std 15,{{[0-9]+}}(4) * 8-byte Folded Spill
-; CHECK64: std 14,{{[0-9]+}}(4) * 8-byte Folded Spill
-; CHECK64: std 13,{{[0-9]+}}(4) * 8-byte Folded Spill
-; CHECK64: std 12,{{[0-9]+}}(4) * 8-byte Folded Spill
-; CHECK64: std 11,{{[0-9]+}}(4) * 8-byte Folded Spill
-; CHECK64: std 10,{{[0-9]+}}(4) * 8-byte Folded Spill
-; CHECK64: std 9,{{[0-9]+}}(4) * 8-byte Folded Spill
-; CHECK64: std 8,{{[0-9]+}}(4) * 8-byte Folded Spill
+; CHECK64: std 15,{{[0-9]+}}(4) * 8-byte Spill
+; CHECK64: std 14,{{[0-9]+}}(4) * 8-byte Spill
+; CHECK64: std 13,{{[0-9]+}}(4) * 8-byte Spill
+; CHECK64: std 12,{{[0-9]+}}(4) * 8-byte Spill
+; CHECK64: std 11,{{[0-9]+}}(4) * 8-byte Spill
+; CHECK64: std 10,{{[0-9]+}}(4) * 8-byte Spill
+; CHECK64: std 9,{{[0-9]+}}(4) * 8-byte Spill
+; CHECK64: std 8,{{[0-9]+}}(4) * 8-byte Spill
; CHECK64: vst 23,{{[0-9]+}}(4),4 * 16-byte Folded Spill
; CHECK64: vst 22,{{[0-9]+}}(4),4 * 16-byte Folded Spill
; CHECK64: vst 21,{{[0-9]+}}(4),4 * 16-byte Folded Spill
@@ -104,14 +104,14 @@ define void @func1(ptr %ptr) {
; CHECK64: vst 17,{{[0-9]+}}(4),4 * 16-byte Folded Spill
; CHECK64: vst 16,{{[0-9]+}}(4),4 * 16-byte Folded Spill
-; CHECK64: ld 15,{{[0-9]+}}(4) * 8-byte Folded Reload
-; CHECK64: ld 14,{{[0-9]+}}(4) * 8-byte Folded Reload
-; CHECK64: ld 13,{{[0-9]+}}(4) * 8-byte Folded Reload
-; CHECK64: ld 12,{{[0-9]+}}(4) * 8-byte Folded Reload
-; CHECK64: ld 11,{{[0-9]+}}(4) * 8-byte Folded Reload
-; CHECK64: ld 10,{{[0-9]+}}(4) * 8-byte Folded Reload
-; CHECK64: ld 9,{{[0-9]+}}(4) * 8-byte Folded Reload
-; CHECK64: ld 8,{{[0-9]+}}(4) * 8-byte Folded Reload
+; CHECK64: ld 15,{{[0-9]+}}(4) * 8-byte Reload
+; CHECK64: ld 14,{{[0-9]+}}(4) * 8-byte Reload
+; CHECK64: ld 13,{{[0-9]+}}(4) * 8-byte Reload
+; CHECK64: ld 12,{{[0-9]+}}(4) * 8-byte Reload
+; CHECK64: ld 11,{{[0-9]+}}(4) * 8-byte Reload
+; CHECK64: ld 10,{{[0-9]+}}(4) * 8-byte Reload
+; CHECK64: ld 9,{{[0-9]+}}(4) * 8-byte Reload
+; CHECK64: ld 8,{{[0-9]+}}(4) * 8-byte Reload
; CHECK64: vl 23,{{[0-9]+}}(4),4 * 16-byte Folded Reload
; CHECK64: vl 22,{{[0-9]+}}(4),4 * 16-byte Folded Reload
; CHECK64: vl 21,{{[0-9]+}}(4),4 * 16-byte Folded Reload
>From 06a128f1f2e0a5fe0be7949fcb5e1c91ae9ef43b Mon Sep 17 00:00:00 2001
From: Dominik Steenken <dost at de.ibm.com>
Date: Mon, 3 Feb 2025 13:20:31 +0100
Subject: [PATCH 3/7] [SystemZ] SystemZDebugInstrInfo 3/6 Adapt the
SystemZCopyPhysRegs pass This commit adapts the systemz-copy-physregs such
that it preserves debug information attached to instructions it replaces.
Specifically, in case a copy targets an access register, we redirect the copy
via an auxiliary register. This leads to the final result being written by a
newly inserted SAR instruction, rather than the original MI, so we need to
update the debug value tracking to account for this. In addition, this commit
adds a previously missing initialization of the pass that was preventing the
pass from being run by llc via --run-pass.
---
.../Target/SystemZ/SystemZCopyPhysRegs.cpp | 5 ++++-
.../Target/SystemZ/SystemZTargetMachine.cpp | 1 +
.../SystemZ/debug-instrref-copyphysregs.mir | 22 +++++++++++++++++++
3 files changed, 27 insertions(+), 1 deletion(-)
create mode 100644 llvm/test/CodeGen/SystemZ/debug-instrref-copyphysregs.mir
diff --git a/llvm/lib/Target/SystemZ/SystemZCopyPhysRegs.cpp b/llvm/lib/Target/SystemZ/SystemZCopyPhysRegs.cpp
index 8979ce4386607..fe9ffd2d704b8 100644
--- a/llvm/lib/Target/SystemZ/SystemZCopyPhysRegs.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZCopyPhysRegs.cpp
@@ -75,6 +75,7 @@ bool SystemZCopyPhysRegs::visitMBB(MachineBasicBlock &MBB) {
DebugLoc DL = MI->getDebugLoc();
Register SrcReg = MI->getOperand(1).getReg();
Register DstReg = MI->getOperand(0).getReg();
+
if (DstReg.isVirtual() &&
(SrcReg == SystemZ::CC || SystemZ::AR32BitRegClass.contains(SrcReg))) {
Register Tmp = MRI->createVirtualRegister(&SystemZ::GR32BitRegClass);
@@ -89,7 +90,9 @@ bool SystemZCopyPhysRegs::visitMBB(MachineBasicBlock &MBB) {
SystemZ::AR32BitRegClass.contains(DstReg)) {
Register Tmp = MRI->createVirtualRegister(&SystemZ::GR32BitRegClass);
MI->getOperand(0).setReg(Tmp);
- BuildMI(MBB, MBBI, DL, TII->get(SystemZ::SAR), DstReg).addReg(Tmp);
+ MachineInstr* NMI = BuildMI(MBB, MBBI, DL, TII->get(SystemZ::SAR), DstReg).addReg(Tmp);
+ // SAR now writes the final value to DstReg, so update debug values.
+ MBB.getParent()->substituteDebugValuesForInst(*MI, *NMI);
Modified = true;
}
}
diff --git a/llvm/lib/Target/SystemZ/SystemZTargetMachine.cpp b/llvm/lib/Target/SystemZ/SystemZTargetMachine.cpp
index 092515ee197a8..6300fb72990d0 100644
--- a/llvm/lib/Target/SystemZ/SystemZTargetMachine.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZTargetMachine.cpp
@@ -48,6 +48,7 @@ extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeSystemZTarget() {
initializeSystemZPostRewritePass(PR);
initializeSystemZTDCPassPass(PR);
initializeSystemZDAGToDAGISelLegacyPass(PR);
+ initializeSystemZCopyPhysRegsPass(PR);
}
static std::string computeDataLayout(const Triple &TT) {
diff --git a/llvm/test/CodeGen/SystemZ/debug-instrref-copyphysregs.mir b/llvm/test/CodeGen/SystemZ/debug-instrref-copyphysregs.mir
new file mode 100644
index 0000000000000..ef0c4810731d6
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/debug-instrref-copyphysregs.mir
@@ -0,0 +1,22 @@
+# Check that the backend properly tracks debug-instr-references across the
+# copy-physregs pass.
+#
+# RUN: llc %s -mtriple=s390x-linux-gnu -run-pass=systemz-copy-physregs \
+# RUN: -o - 2>&1 | FileCheck %s
+
+# COPY 1: Copy VirtReg to AR
+# COPY 2: Copy AR to VirtReg
+# COPY 3: Copy CC to VirtReg
+# CHECK: name: foo
+# CHECK: debugValueSubstitutions:
+# these are the correct substitutions
+# CHECK-NEXT: - { srcinst: 1, srcop: 0, dstinst: 4, dstop: 0, subreg: 0 }
+# we also need to make sure that these are the only substitutions
+# CHECK-NEXT: constants: []
+name: foo
+body: |
+ bb.0:
+ liveins: $a1
+ COPY def $a1, %1:gr32bit, debug-instr-number 1
+ COPY def %2:gr32bit, $a1, debug-instr-number 2
+ COPY def %3:gr32bit, $cc, debug-instr-number 3
>From 088f21110ff2a6de2c8d72dfad8514d62eac4e0f Mon Sep 17 00:00:00 2001
From: Dominik Steenken <dost at de.ibm.com>
Date: Mon, 3 Feb 2025 13:52:23 +0100
Subject: [PATCH 4/7] [SystemZ] SystemZDebugInstrInfo 4/6 Adapt
SystemZLongBranch pass This commit adapts the systemz-long-branch pass such
that it preserves debug information attached to the instructions it replaces.
In this pass, only branch-and-count instructions qualify, since they are the
only ones that modify a tracked register.
---
llvm/lib/Target/SystemZ/SystemZLongBranch.cpp | 13 +++++---
.../SystemZ/Large/debug-instrref-brct.py | 33 +++++++++++++++++++
2 files changed, 42 insertions(+), 4 deletions(-)
create mode 100644 llvm/test/CodeGen/SystemZ/Large/debug-instrref-brct.py
diff --git a/llvm/lib/Target/SystemZ/SystemZLongBranch.cpp b/llvm/lib/Target/SystemZ/SystemZLongBranch.cpp
index 36d76235398ed..f19b932f3c731 100644
--- a/llvm/lib/Target/SystemZ/SystemZLongBranch.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZLongBranch.cpp
@@ -374,16 +374,19 @@ void SystemZLongBranch::splitBranchOnCount(MachineInstr *MI,
unsigned AddOpcode) {
MachineBasicBlock *MBB = MI->getParent();
DebugLoc DL = MI->getDebugLoc();
- BuildMI(*MBB, MI, DL, TII->get(AddOpcode))
- .add(MI->getOperand(0))
- .add(MI->getOperand(1))
- .addImm(-1);
+ MachineInstr *AddImm = BuildMI(*MBB, MI, DL, TII->get(AddOpcode))
+ .add(MI->getOperand(0))
+ .add(MI->getOperand(1))
+ .addImm(-1);
MachineInstr *BRCL = BuildMI(*MBB, MI, DL, TII->get(SystemZ::BRCL))
.addImm(SystemZ::CCMASK_ICMP)
.addImm(SystemZ::CCMASK_CMP_NE)
.add(MI->getOperand(2));
// The implicit use of CC is a killing use.
BRCL->addRegisterKilled(SystemZ::CC, &TII->getRegisterInfo());
+ // The result of the BRANCH ON COUNT MI is the new count in register 0, so the
+ // debug tracking needs to go to the result of the Add immediate.
+ MBB->getParent()->substituteDebugValuesForInst(*MI, *AddImm);
MI->eraseFromParent();
}
@@ -402,6 +405,8 @@ void SystemZLongBranch::splitCompareBranch(MachineInstr *MI,
.add(MI->getOperand(3));
// The implicit use of CC is a killing use.
BRCL->addRegisterKilled(SystemZ::CC, &TII->getRegisterInfo());
+ // Since we are replacing branches that did not compute any value, no debug
+ // value substitution is necessary.
MI->eraseFromParent();
}
diff --git a/llvm/test/CodeGen/SystemZ/Large/debug-instrref-brct.py b/llvm/test/CodeGen/SystemZ/Large/debug-instrref-brct.py
new file mode 100644
index 0000000000000..05593a672bf05
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/Large/debug-instrref-brct.py
@@ -0,0 +1,33 @@
+# RUN: %python %s | llc -mtriple=s390x-linux-gnu -x mir --run-pass=systemz-long-branch \
+# RUN: | FileCheck %s
+
+# CHECK: debugValueSubstitutions:
+# CHECK: - { srcinst: 1, srcop: 0, dstinst: 3, dstop: 0, subreg: 0 }
+# CHECK: - { srcinst: 1, srcop: 3, dstinst: 3, dstop: 3, subreg: 0 }
+# CHECK-NEXT: constants: []
+# CHECK: $r3l = AHI $r3l, -1
+# CHECK-NEXT: BRCL 14, 6, %bb.2
+print(" name: main")
+print(" alignment: 16")
+print(" tracksRegLiveness: true")
+print(" liveins: ")
+print(" - { reg: '$r1d', virtual-reg: '' }")
+print(" - { reg: '$r2d', virtual-reg: '' }")
+print(" - { reg: '$r3l', virtual-reg: '' }")
+print(" - { reg: '$r4l', virtual-reg: '' }")
+print(" debugValueSubstitutions: []")
+print(" body: |")
+print(" bb.0:")
+print(" liveins: $r3l, $r4l, $r2d, $r3d")
+print(" $r3l = BRCT $r3l, %bb.2, implicit-def $cc, debug-instr-number 1")
+print(" J %bb.1, debug-instr-number 2")
+print(" bb.1:")
+print(" liveins: $r1d, $r2d")
+for i in range(0, 8192):
+ print(" $r1d = LGR $r2d")
+ print(" $r2d = LGR $r1d")
+print(" Return implicit $r2d")
+print(" bb.2:")
+print(" liveins: $r4l")
+print(" Return implicit $r4l")
+
>From 4bc3e365ecc2d14485c2f2d50852b479d7d08948 Mon Sep 17 00:00:00 2001
From: Dominik Steenken <dost at de.ibm.com>
Date: Wed, 19 Feb 2025 10:22:08 +0100
Subject: [PATCH 5/7] [SystemZ] SystemZDebugInstrInfo 5/6 Adapt
SystemZPostRewrite Pass This commit adapts the systemz-post-rewrite pass such
that it preserves debug information attached to the instructions it replaces.
Specifically, this pass replaces `LOCR` and `SELR` pseudoinstructions with
either the real versions of those instructions, of with branching programs
that implement the intent of the Pseudo. In all these cases, any
`debug-instr-number` attached to the pseudo needs to be reallocated to the
appropriate instruction in the result.
---
.../lib/Target/SystemZ/SystemZPostRewrite.cpp | 18 ++++++++++----
.../SystemZ/debug-instrref-postrewrite.mir | 24 +++++++++++++++++++
2 files changed, 38 insertions(+), 4 deletions(-)
create mode 100644 llvm/test/CodeGen/SystemZ/debug-instrref-postrewrite.mir
diff --git a/llvm/lib/Target/SystemZ/SystemZPostRewrite.cpp b/llvm/lib/Target/SystemZ/SystemZPostRewrite.cpp
index 4b16bcf95d51c..d31fb77c41e9c 100644
--- a/llvm/lib/Target/SystemZ/SystemZPostRewrite.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZPostRewrite.cpp
@@ -19,6 +19,7 @@
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/LivePhysRegs.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
using namespace llvm;
@@ -108,15 +109,19 @@ void SystemZPostRewrite::selectSELRMux(MachineBasicBlock &MBB,
bool DestIsHigh = SystemZ::isHighReg(DestReg);
bool Src1IsHigh = SystemZ::isHighReg(Src1Reg);
bool Src2IsHigh = SystemZ::isHighReg(Src2Reg);
+ // A copy instruction that we might create, held here for the purpose of
+ // debug instr value tracking.
+ MachineInstr* CopyInst = nullptr;
// In rare cases both sources are the same register (after
// machine-cse). This must be handled as it may lead to wrong-code (after
// machine-cp) if the kill flag on Src1 isn't cleared (with
// expandCondMove()).
if (Src1Reg == Src2Reg) {
- BuildMI(*MBBI->getParent(), MBBI, MBBI->getDebugLoc(),
+ CopyInst = BuildMI(*MBBI->getParent(), MBBI, MBBI->getDebugLoc(),
TII->get(SystemZ::COPY), DestReg)
.addReg(Src1Reg, getRegState(Src1MO) & getRegState(Src2MO));
+ MBB.getParent()->substituteDebugValuesForInst(*MBBI, *CopyInst, 1);
MBBI->eraseFromParent();
return;
}
@@ -126,14 +131,14 @@ void SystemZPostRewrite::selectSELRMux(MachineBasicBlock &MBB,
// first. But only if this doesn't clobber the other source.
if (DestReg != Src1Reg && DestReg != Src2Reg) {
if (DestIsHigh != Src1IsHigh) {
- BuildMI(*MBBI->getParent(), MBBI, MBBI->getDebugLoc(),
+ CopyInst = BuildMI(*MBBI->getParent(), MBBI, MBBI->getDebugLoc(),
TII->get(SystemZ::COPY), DestReg)
.addReg(Src1Reg, getRegState(Src1MO));
Src1MO.setReg(DestReg);
Src1Reg = DestReg;
Src1IsHigh = DestIsHigh;
} else if (DestIsHigh != Src2IsHigh) {
- BuildMI(*MBBI->getParent(), MBBI, MBBI->getDebugLoc(),
+ CopyInst = BuildMI(*MBBI->getParent(), MBBI, MBBI->getDebugLoc(),
TII->get(SystemZ::COPY), DestReg)
.addReg(Src2Reg, getRegState(Src2MO));
Src2MO.setReg(DestReg);
@@ -141,6 +146,9 @@ void SystemZPostRewrite::selectSELRMux(MachineBasicBlock &MBB,
Src2IsHigh = DestIsHigh;
}
}
+ // if a copy instruction was inserted, record the debug value substitution
+ if (CopyInst)
+ MBB.getParent()->substituteDebugValuesForInst(*MBBI, *CopyInst, 1);
// If the destination (now) matches one source, prefer this to be first.
if (DestReg != Src1Reg && DestReg == Src2Reg) {
@@ -204,8 +212,10 @@ bool SystemZPostRewrite::expandCondMove(MachineBasicBlock &MBB,
// In MoveMBB, emit an instruction to move SrcReg into DestReg,
// then fall through to RestMBB.
- BuildMI(*MoveMBB, MoveMBB->end(), DL, TII->get(SystemZ::COPY), DestReg)
+ MachineInstr* CopyInst = BuildMI(*MoveMBB, MoveMBB->end(), DL, TII->get(SystemZ::COPY), DestReg)
.addReg(MI.getOperand(2).getReg(), getRegState(MI.getOperand(2)));
+ // record the debug value substitution for CopyInst
+ MBB.getParent()->substituteDebugValuesForInst(*MBBI, *CopyInst, 1);
MoveMBB->addSuccessor(RestMBB);
NextMBBI = MBB.end();
diff --git a/llvm/test/CodeGen/SystemZ/debug-instrref-postrewrite.mir b/llvm/test/CodeGen/SystemZ/debug-instrref-postrewrite.mir
new file mode 100644
index 0000000000000..a0bb2c1b9ed83
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/debug-instrref-postrewrite.mir
@@ -0,0 +1,24 @@
+# Check that the backend properly tracks debug-instr-references across the
+# post-rewrite pass.
+#
+# RUN: llc %s -mtriple=s390x-linux-gnu -run-pass=systemz-post-rewrite \
+# RUN: -o - 2>&1 | FileCheck %s
+
+# SELRMux 1: simple replace with copy
+# SELRMux 2: simple mutation into selfhr
+# SELRMux 3: replace with if-then-else without prior copy
+# SELRMux 4: replace with if-then-else with prior copy
+# CHECK: name: foo
+# CHECK: debugValueSubstitutions:
+# CHECK-NEXT: - { srcinst: 1, srcop: 0, dstinst: 5, dstop: 0, subreg: 0 }
+# CHECK-NEXT: - { srcinst: 3, srcop: 0, dstinst: 6, dstop: 0, subreg: 0 }
+# CHECK-NEXT: - { srcinst: 4, srcop: 0, dstinst: 7, dstop: 0, subreg: 0 }
+# CHECK-NEXT: - { srcinst: 4, srcop: 0, dstinst: 8, dstop: 0, subreg: 0 }
+name: foo
+body: |
+ bb.0:
+ liveins: $r2h, $r3h, $r2l, $r3l, $cc
+ SELRMux def $r2h, renamable $r3l, renamable $r3l, 1, 2, implicit $cc, debug-instr-number 1
+ SELRMux def $r1h, renamable $r2h, renamable $r3h, 1, 2, implicit $cc, debug-instr-number 2
+ SELRMux def $r2h, renamable $r2h, renamable $r3l, 1, 2, implicit $cc, debug-instr-number 3
+ SELRMux def $r1h, renamable $r2l, renamable $r3l, 1, 2, implicit $cc, debug-instr-number 4
>From 20a1c5b2eb2ed38f4a44cad9889093d5feec88b2 Mon Sep 17 00:00:00 2001
From: Dominik Steenken <dost at de.ibm.com>
Date: Fri, 21 Feb 2025 14:53:44 +0100
Subject: [PATCH 6/7] [SystemZ] SystemZDebugInstrInfo 6/6 Adapt
SystemZElimCompare Pass This commit adapts the systemz-elim-compare pass such
that it preserves debug information attached to the instructions it replaces.
For this pass, only few substitutions are necessary, since it mainly deals
with conditional branch instructions. The only exceptiona are
branch-on-count, as it modifies a counter as part of the instruction, as well
as any of the load instructions that are affected.
---
.../lib/Target/SystemZ/SystemZElimCompare.cpp | 9 +++
.../SystemZ/debug-instrref-elimcompare.mir | 65 +++++++++++++++++++
2 files changed, 74 insertions(+)
create mode 100644 llvm/test/CodeGen/SystemZ/debug-instrref-elimcompare.mir
diff --git a/llvm/lib/Target/SystemZ/SystemZElimCompare.cpp b/llvm/lib/Target/SystemZ/SystemZElimCompare.cpp
index 9f4d4aaa68fa3..789365fb9e311 100644
--- a/llvm/lib/Target/SystemZ/SystemZElimCompare.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZElimCompare.cpp
@@ -227,6 +227,9 @@ bool SystemZElimCompare::convertToBRCT(
// this is not necessary there.
if (BRCT != SystemZ::BRCTH)
MIB.addReg(SystemZ::CC, RegState::ImplicitDefine | RegState::Dead);
+ // The debug instr tracking for the counter now used by BRCT needs to be
+ // updated.
+ MI.getParent()->getParent()->substituteDebugValuesForInst(MI, *MIB);
MI.eraseFromParent();
return true;
}
@@ -268,6 +271,9 @@ bool SystemZElimCompare::convertToLoadAndTrap(
.add(MI.getOperand(1))
.add(MI.getOperand(2))
.add(MI.getOperand(3));
+ // The debug instr tracking for the load target now used by the load-and-trap
+ // needs to be updated.
+ MI.getParent()->getParent()->substituteDebugValuesForInst(MI, *Branch);
MI.eraseFromParent();
return true;
}
@@ -288,6 +294,9 @@ bool SystemZElimCompare::convertToLoadAndTest(
for (const auto &MO : MI.operands())
MIB.add(MO);
MIB.setMemRefs(MI.memoperands());
+ // The debug instr tracking for the load target now needs to be updated
+ // because the load has moved to a new instruction
+ MI.getParent()->getParent()->substituteDebugValuesForInst(MI, *MIB);
MI.eraseFromParent();
// Mark instruction as not raising an FP exception if applicable. We already
diff --git a/llvm/test/CodeGen/SystemZ/debug-instrref-elimcompare.mir b/llvm/test/CodeGen/SystemZ/debug-instrref-elimcompare.mir
new file mode 100644
index 0000000000000..9382b7ad18fca
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/debug-instrref-elimcompare.mir
@@ -0,0 +1,65 @@
+# Check that the backend properly tracks debug-instr-references across the
+# elim-compare pass.
+#
+# RUN: llc %s -mtriple=s390x-linux-gnu -mcpu=z14 -run-pass=systemz-elim-compare \
+# RUN: -o - 2>&1 | FileCheck %s
+
+# bb.0 - elimination of CHI, modification of BRC, no substitutions
+# bb.1 - elimination of CHI, replacement of LR with LTR, one substitution
+# bb.2 - elimination of L and CHI, modification of CondTrap into LAT, one substitution
+# CHECK: name: foo
+# CHECK: debugValueSubstitutions:
+# these are the correct substitutions
+# CHECK-NEXT: - { srcinst: 5, srcop: 0, dstinst: 13, dstop: 0, subreg: 0 }
+# CHECK-NEXT: - { srcinst: 7, srcop: 0, dstinst: 9, dstop: 0, subreg: 0 }
+# CHECK-NEXT: - { srcinst: 10, srcop: 0, dstinst: 14, dstop: 0, subreg: 0 }
+# we also need to make sure that these are the only substitutions
+# CHECK-NEXT: constants: []
+---
+name: foo
+tracksRegLiveness: true
+liveins:
+ - { reg: '$r2l', virtual-reg: '' }
+ - { reg: '$r3l', virtual-reg: '' }
+ - { reg: '$r4l', virtual-reg: '' }
+ - { reg: '$r5d', virtual-reg: '' }
+debugValueSubstitutions: []
+body: |
+ bb.0:
+ successors: %bb.1(0x80000000)
+ liveins: $r2l, $r3l, $r4l, $r5d
+
+ renamable $r3l = nsw AR killed renamable $r3l, renamable $r2l, implicit-def dead $cc, debug-instr-number 1
+ CHI renamable $r3l, 0, implicit-def $cc, debug-instr-number 2
+ BRC 14, 12, %bb.1, implicit $cc, debug-instr-number 3
+
+ bb.1:
+ successors: %bb.2(0x80000000)
+ liveins: $r2l, $r3l, $r4l, $r5d
+
+ CHI renamable $r2l, 0, implicit-def $cc, debug-instr-number 4
+ renamable $r3l = LR renamable $r2l, debug-instr-number 5
+ BRC 14, 8, %bb.2, implicit killed $cc, debug-instr-number 6
+
+ bb.2:
+ successors: %bb.3(0x80000000)
+ liveins: $r2l, $r3l, $r4l, $r5d
+
+ renamable $r2l = L killed renamable $r5d, 0, $noreg, debug-instr-number 7
+ CHI renamable $r2l, 0, implicit-def $cc, debug-instr-number 8
+ CondTrap 14, 8, implicit killed $cc, debug-instr-number 9
+ J %bb.3
+
+ bb.3:
+ successors: %bb.4(080000000)
+ liveins: $r2l, $r3l, $r4l, $r5d
+
+ renamable $r3l = L renamable $r5d, 0, $noreg, debug-instr-number 10
+ CHI renamable $r3l, 0, implicit-def $cc, debug-instr-number 11
+ BRC 14, 8, %bb.4, implicit killed $cc, debug-instr-number 12
+
+ bb.4:
+ $r2l = LHI 2
+ Return implicit $r2l
+
+...
>From b51aab61076fef125983d1188e178058949d6513 Mon Sep 17 00:00:00 2001
From: Dominik Steenken <dost at de.ibm.com>
Date: Thu, 20 Mar 2025 14:01:36 +0100
Subject: [PATCH 7/7] clang-format and cleanup
---
.../Target/SystemZ/SystemZCopyPhysRegs.cpp | 3 +-
llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp | 28 ++++---------------
llvm/lib/Target/SystemZ/SystemZInstrInfo.h | 7 +++--
.../lib/Target/SystemZ/SystemZPostRewrite.cpp | 19 +++++++------
.../SystemZ/Large/debug-instrref-brct.py | 1 -
5 files changed, 22 insertions(+), 36 deletions(-)
diff --git a/llvm/lib/Target/SystemZ/SystemZCopyPhysRegs.cpp b/llvm/lib/Target/SystemZ/SystemZCopyPhysRegs.cpp
index fe9ffd2d704b8..a6cf0f57aaf06 100644
--- a/llvm/lib/Target/SystemZ/SystemZCopyPhysRegs.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZCopyPhysRegs.cpp
@@ -90,7 +90,8 @@ bool SystemZCopyPhysRegs::visitMBB(MachineBasicBlock &MBB) {
SystemZ::AR32BitRegClass.contains(DstReg)) {
Register Tmp = MRI->createVirtualRegister(&SystemZ::GR32BitRegClass);
MI->getOperand(0).setReg(Tmp);
- MachineInstr* NMI = BuildMI(MBB, MBBI, DL, TII->get(SystemZ::SAR), DstReg).addReg(Tmp);
+ MachineInstr *NMI =
+ BuildMI(MBB, MBBI, DL, TII->get(SystemZ::SAR), DstReg).addReg(Tmp);
// SAR now writes the final value to DstReg, so update debug values.
MBB.getParent()->substituteDebugValuesForInst(*MI, *NMI);
Modified = true;
diff --git a/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp b/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
index c47a101ad5a60..5c1f3709612dd 100644
--- a/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
@@ -39,7 +39,6 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Target/TargetMachine.h"
-//#include "SystemZRegisterInfo.h"
#include <cassert>
#include <cstdint>
#include <iterator>
@@ -349,7 +348,8 @@ Register SystemZInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
Register SystemZInstrInfo::isLoadFromStackSlotPostFE(const MachineInstr &MI,
int &FrameIndex) const {
- // if this is not a simple load from memory, it's not a load from stack slot either.
+ // if this is not a simple load from memory, it's not a load from stack slot
+ // either.
const MCInstrDesc &MCID = MI.getDesc();
if (!(MCID.TSFlags & SystemZII::SimpleBDXLoad))
return 0;
@@ -371,7 +371,8 @@ Register SystemZInstrInfo::isLoadFromStackSlotPostFE(const MachineInstr &MI,
Register SystemZInstrInfo::isStoreToStackSlotPostFE(const MachineInstr &MI,
int &FrameIndex) const {
- // if this is not a simple store to memory, it's not a store to stack slot either.
+ // if this is not a simple store to memory, it's not a store to stack slot
+ // either.
const MCInstrDesc &MCID = MI.getDesc();
if (!(MCID.TSFlags & SystemZII::SimpleBDXStore))
return 0;
@@ -2362,27 +2363,10 @@ bool SystemZInstrInfo::getConstValDefinedInReg(const MachineInstr &MI,
return false;
}
-std::optional<DestSourcePair> SystemZInstrInfo::isCopyInstrImpl(const MachineInstr &MI) const {
+std::optional<DestSourcePair>
+SystemZInstrInfo::isCopyInstrImpl(const MachineInstr &MI) const {
// if MI is a simple single-register copy operation, return operand pair
if (MI.isMoveReg())
return DestSourcePair(MI.getOperand(0), MI.getOperand(1));
- // more complicated cases might be handled here.
- /*
- switch (MI.getOpcode()) {
- // VMHRG would work but then how can i turn a register into a MachineOperand?
- case (SystemZ::VMRHG): {
- MCRegister OpReg1 = MI.getOperand(1).getReg();
- MCRegister OpReg2 = MI.getOperand(2).getReg();
- // Check if OpReg1 and OpReg2 together can form an FP128 register.
- MCRegister FirstReg = (OpReg1.id() < OpReg2.id()) ? OpReg1 : OpReg2;
- MCRegister SecondReg = (OpReg1.id() < OpReg2.id()) ? OpReg2 : OpReg1;
- MCRegister FP1 = RI.getMatchingSuperReg(FirstReg, SystemZ::subreg_h64, &SystemZ::FP128BitRegClass);
- MCRegister FP2 = RI.getMatchingSuperReg(SecondReg, SystemZ::subreg_h64, &SystemZ::FP128BitRegClass);
- if (FP1 != FP2)
- return std::nullopt;
- return DestSourcePair(MI.getOperand(0), MachineOperand(FP1));
- }
- }
- */
return std::nullopt;
}
diff --git a/llvm/lib/Target/SystemZ/SystemZInstrInfo.h b/llvm/lib/Target/SystemZ/SystemZInstrInfo.h
index 5d972a504be54..a8d282dd9e417 100644
--- a/llvm/lib/Target/SystemZ/SystemZInstrInfo.h
+++ b/llvm/lib/Target/SystemZ/SystemZInstrInfo.h
@@ -233,9 +233,9 @@ class SystemZInstrInfo : public SystemZGenInstrInfo {
Register isStoreToStackSlot(const MachineInstr &MI,
int &FrameIndex) const override;
Register isLoadFromStackSlotPostFE(const MachineInstr &MI,
- int &FrameIndex) const override;
+ int &FrameIndex) const override;
Register isStoreToStackSlotPostFE(const MachineInstr &MI,
- int &FrameIndex) const override;
+ int &FrameIndex) const override;
bool isStackSlotCopy(const MachineInstr &MI, int &DestFrameIndex,
int &SrcFrameIndex) const override;
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
@@ -391,7 +391,8 @@ class SystemZInstrInfo : public SystemZGenInstrInfo {
bool getConstValDefinedInReg(const MachineInstr &MI, const Register Reg,
int64_t &ImmVal) const override;
- std::optional<DestSourcePair> isCopyInstrImpl(const MachineInstr &MI) const override;
+ std::optional<DestSourcePair>
+ isCopyInstrImpl(const MachineInstr &MI) const override;
};
} // end namespace llvm
diff --git a/llvm/lib/Target/SystemZ/SystemZPostRewrite.cpp b/llvm/lib/Target/SystemZ/SystemZPostRewrite.cpp
index d31fb77c41e9c..ffeba87795625 100644
--- a/llvm/lib/Target/SystemZ/SystemZPostRewrite.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZPostRewrite.cpp
@@ -111,7 +111,7 @@ void SystemZPostRewrite::selectSELRMux(MachineBasicBlock &MBB,
bool Src2IsHigh = SystemZ::isHighReg(Src2Reg);
// A copy instruction that we might create, held here for the purpose of
// debug instr value tracking.
- MachineInstr* CopyInst = nullptr;
+ MachineInstr *CopyInst = nullptr;
// In rare cases both sources are the same register (after
// machine-cse). This must be handled as it may lead to wrong-code (after
@@ -119,8 +119,8 @@ void SystemZPostRewrite::selectSELRMux(MachineBasicBlock &MBB,
// expandCondMove()).
if (Src1Reg == Src2Reg) {
CopyInst = BuildMI(*MBBI->getParent(), MBBI, MBBI->getDebugLoc(),
- TII->get(SystemZ::COPY), DestReg)
- .addReg(Src1Reg, getRegState(Src1MO) & getRegState(Src2MO));
+ TII->get(SystemZ::COPY), DestReg)
+ .addReg(Src1Reg, getRegState(Src1MO) & getRegState(Src2MO));
MBB.getParent()->substituteDebugValuesForInst(*MBBI, *CopyInst, 1);
MBBI->eraseFromParent();
return;
@@ -132,15 +132,15 @@ void SystemZPostRewrite::selectSELRMux(MachineBasicBlock &MBB,
if (DestReg != Src1Reg && DestReg != Src2Reg) {
if (DestIsHigh != Src1IsHigh) {
CopyInst = BuildMI(*MBBI->getParent(), MBBI, MBBI->getDebugLoc(),
- TII->get(SystemZ::COPY), DestReg)
- .addReg(Src1Reg, getRegState(Src1MO));
+ TII->get(SystemZ::COPY), DestReg)
+ .addReg(Src1Reg, getRegState(Src1MO));
Src1MO.setReg(DestReg);
Src1Reg = DestReg;
Src1IsHigh = DestIsHigh;
} else if (DestIsHigh != Src2IsHigh) {
CopyInst = BuildMI(*MBBI->getParent(), MBBI, MBBI->getDebugLoc(),
- TII->get(SystemZ::COPY), DestReg)
- .addReg(Src2Reg, getRegState(Src2MO));
+ TII->get(SystemZ::COPY), DestReg)
+ .addReg(Src2Reg, getRegState(Src2MO));
Src2MO.setReg(DestReg);
Src2Reg = DestReg;
Src2IsHigh = DestIsHigh;
@@ -212,8 +212,9 @@ bool SystemZPostRewrite::expandCondMove(MachineBasicBlock &MBB,
// In MoveMBB, emit an instruction to move SrcReg into DestReg,
// then fall through to RestMBB.
- MachineInstr* CopyInst = BuildMI(*MoveMBB, MoveMBB->end(), DL, TII->get(SystemZ::COPY), DestReg)
- .addReg(MI.getOperand(2).getReg(), getRegState(MI.getOperand(2)));
+ MachineInstr *CopyInst =
+ BuildMI(*MoveMBB, MoveMBB->end(), DL, TII->get(SystemZ::COPY), DestReg)
+ .addReg(MI.getOperand(2).getReg(), getRegState(MI.getOperand(2)));
// record the debug value substitution for CopyInst
MBB.getParent()->substituteDebugValuesForInst(*MBBI, *CopyInst, 1);
MoveMBB->addSuccessor(RestMBB);
diff --git a/llvm/test/CodeGen/SystemZ/Large/debug-instrref-brct.py b/llvm/test/CodeGen/SystemZ/Large/debug-instrref-brct.py
index 05593a672bf05..36c3836bfca59 100644
--- a/llvm/test/CodeGen/SystemZ/Large/debug-instrref-brct.py
+++ b/llvm/test/CodeGen/SystemZ/Large/debug-instrref-brct.py
@@ -30,4 +30,3 @@
print(" bb.2:")
print(" liveins: $r4l")
print(" Return implicit $r4l")
-
More information about the llvm-commits
mailing list