[llvm] [SystemZ] Add `is(LoadFrom|StoreTo)StackSlotPostFE` to SystemZBackend (PR #132928)
Dominik Steenken via llvm-commits
llvm-commits at lists.llvm.org
Tue Mar 25 05:50:42 PDT 2025
https://github.com/dominik-steenken created https://github.com/llvm/llvm-project/pull/132928
As part of an effort to enable instr-ref-based debug value tracking, this PR implements `SystemZInstrInfo::isLoadFromStackSlotPostFE`, as well as `SystemZInstrInfo::isStoreToStackSlotPostFE`. The implementation relies upon the presence of MachineMemoryOperands on the relevant `MachineInstr`s in order to access the `FrameIndex` post frame index elimination.
Since these new functions are only meant to be called after frame-index elimination, they assert against the present of a frame index on the base register operand of the instruction.
Outside of the utility of these functions to enable instr-ref-based debug value tracking, they also changes the behavior of the AsmPrinter, since it will now be able to properly detect non-folded spills and reloads, so this changes a number of tests that were checking specifically for folded reloads.
Note that there are some tests that still check for `vst` and `vl` as folded spills/reloads even though they should be straight reloads. This will be addressed in a future PR.
>From 78bdca95e18da77c6334bd058e7da0f4988bb040 Mon Sep 17 00:00:00 2001
From: Dominik Steenken <dominik.steenken at gmail.com>
Date: Tue, 21 Jan 2025 10:38:02 +0100
Subject: [PATCH] [SystemZ] Add is[LoadFrom|StoreTo]StackSlotPostFE to
SystemZBackend As part of an effort to enable instr-ref-based debug value
tracking, this commit implements
`SystemZInstrInfo::isLoadFromStackSlotPostFE`, as well as
`SystemZInstrInfo::isStoreToStackSlotPostFE`. The implementation relies upon
the presence of MachineMemoryOperands on the relevant `MachineInstr`s in
order to access the `FrameIndex` post frame index elimination.
Since these new functions are only meant to be called after frame-index
elimination, they assert against the present of a frame index on the
base register operand of the instruction.
Outside of the utility of these functions to enable instr-ref-based debug value
tracking, they also changes the behavior of the AsmPrinter, since it will now
be able to properly detect non-folded spills and reloads, so this changes a
number of tests that were checking specifically for folded reloads.
Note that there are some tests that still check for `vst` and `vl` as folded
spills/reloads even though they should be straight reloads. This will be
addressed in a future commit.
---
llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp | 46 +
llvm/lib/Target/SystemZ/SystemZInstrInfo.h | 4 +
.../CodeGen/SystemZ/builtin-setjmp-alloca.ll | 64 +-
.../CodeGen/SystemZ/builtin-setjmp-spills.ll | 112 +--
llvm/test/CodeGen/SystemZ/builtin-setjmp.ll | 64 +-
.../CodeGen/SystemZ/fmuladd-soft-float.ll | 14 +-
.../test/CodeGen/SystemZ/foldmemop-imm-02.mir | 4 +-
.../test/CodeGen/SystemZ/foldmemop-vec-cc.mir | 2 +-
.../CodeGen/SystemZ/foldmemop-vec-cmp.mir | 8 +-
.../CodeGen/SystemZ/foldmemop-vec-unary.mir | 8 +-
llvm/test/CodeGen/SystemZ/fp-move-02.ll | 102 +--
llvm/test/CodeGen/SystemZ/frame-22.ll | 20 +-
llvm/test/CodeGen/SystemZ/int-uadd-03.ll | 4 +-
llvm/test/CodeGen/SystemZ/int-usub-03.ll | 4 +-
.../vector-constrained-fp-intrinsics.ll | 852 +++++++++---------
.../CodeGen/SystemZ/zos-prologue-epilog.ll | 32 +-
16 files changed, 695 insertions(+), 645 deletions(-)
diff --git a/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp b/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
index ab2e5b3c9a190..303e902bf108a 100644
--- a/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
@@ -346,6 +346,52 @@ Register SystemZInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
return isSimpleMove(MI, FrameIndex, SystemZII::SimpleBDXStore);
}
+Register SystemZInstrInfo::isLoadFromStackSlotPostFE(const MachineInstr &MI,
+ int &FrameIndex) const {
+ // if this is not a simple load from memory, it's not a load from stack slot
+ // either.
+ const MCInstrDesc &MCID = MI.getDesc();
+ if (!(MCID.TSFlags & SystemZII::SimpleBDXLoad))
+ return 0;
+
+ // This version of isLoadFromStackSlot should only be used post frame-index
+ // elimination.
+ assert(!MI.getOperand(1).isFI());
+
+ // Now attempt to derive frame index from MachineMemOperands.
+ SmallVector<const MachineMemOperand *, 1> Accesses;
+ if (hasLoadFromStackSlot(MI, Accesses)) {
+ FrameIndex =
+ cast<FixedStackPseudoSourceValue>(Accesses.front()->getPseudoValue())
+ ->getFrameIndex();
+ return MI.getOperand(0).getReg();
+ }
+ return 0;
+}
+
+Register SystemZInstrInfo::isStoreToStackSlotPostFE(const MachineInstr &MI,
+ int &FrameIndex) const {
+ // if this is not a simple store to memory, it's not a store to stack slot
+ // either.
+ const MCInstrDesc &MCID = MI.getDesc();
+ if (!(MCID.TSFlags & SystemZII::SimpleBDXStore))
+ return 0;
+
+ // This version of isStoreToStackSlot should only be used post frame-index
+ // elimination.
+ assert(!MI.getOperand(1).isFI());
+
+ // Now attempt to derive frame index from MachineMemOperands.
+ SmallVector<const MachineMemOperand *, 1> Accesses;
+ if (hasStoreToStackSlot(MI, Accesses)) {
+ FrameIndex =
+ cast<FixedStackPseudoSourceValue>(Accesses.front()->getPseudoValue())
+ ->getFrameIndex();
+ return MI.getOperand(0).getReg();
+ }
+ return 0;
+}
+
bool SystemZInstrInfo::isStackSlotCopy(const MachineInstr &MI,
int &DestFrameIndex,
int &SrcFrameIndex) const {
diff --git a/llvm/lib/Target/SystemZ/SystemZInstrInfo.h b/llvm/lib/Target/SystemZ/SystemZInstrInfo.h
index 5f09ad508905d..510442055362a 100644
--- a/llvm/lib/Target/SystemZ/SystemZInstrInfo.h
+++ b/llvm/lib/Target/SystemZ/SystemZInstrInfo.h
@@ -232,6 +232,10 @@ class SystemZInstrInfo : public SystemZGenInstrInfo {
int &FrameIndex) const override;
Register isStoreToStackSlot(const MachineInstr &MI,
int &FrameIndex) const override;
+ Register isLoadFromStackSlotPostFE(const MachineInstr &MI,
+ int &FrameIndex) const override;
+ Register isStoreToStackSlotPostFE(const MachineInstr &MI,
+ int &FrameIndex) const override;
bool isStackSlotCopy(const MachineInstr &MI, int &DestFrameIndex,
int &SrcFrameIndex) const override;
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
diff --git a/llvm/test/CodeGen/SystemZ/builtin-setjmp-alloca.ll b/llvm/test/CodeGen/SystemZ/builtin-setjmp-alloca.ll
index f714599f9a8f2..9bd6ff8eea4c5 100644
--- a/llvm/test/CodeGen/SystemZ/builtin-setjmp-alloca.ll
+++ b/llvm/test/CodeGen/SystemZ/builtin-setjmp-alloca.ll
@@ -30,14 +30,14 @@ define signext i32 @foo() "frame-pointer"="all" {
; CHECK-NEXT: .cfi_def_cfa_offset 400
; CHECK-NEXT: lgr %r11, %r15
; CHECK-NEXT: .cfi_def_cfa_register %r11
-; CHECK-NEXT: std %f8, 232(%r11) # 8-byte Folded Spill
-; CHECK-NEXT: std %f9, 224(%r11) # 8-byte Folded Spill
-; CHECK-NEXT: std %f10, 216(%r11) # 8-byte Folded Spill
-; CHECK-NEXT: std %f11, 208(%r11) # 8-byte Folded Spill
-; CHECK-NEXT: std %f12, 200(%r11) # 8-byte Folded Spill
-; CHECK-NEXT: std %f13, 192(%r11) # 8-byte Folded Spill
-; CHECK-NEXT: std %f14, 184(%r11) # 8-byte Folded Spill
-; CHECK-NEXT: std %f15, 176(%r11) # 8-byte Folded Spill
+; CHECK-NEXT: std %f8, 232(%r11) # 8-byte Spill
+; CHECK-NEXT: std %f9, 224(%r11) # 8-byte Spill
+; CHECK-NEXT: std %f10, 216(%r11) # 8-byte Spill
+; CHECK-NEXT: std %f11, 208(%r11) # 8-byte Spill
+; CHECK-NEXT: std %f12, 200(%r11) # 8-byte Spill
+; CHECK-NEXT: std %f13, 192(%r11) # 8-byte Spill
+; CHECK-NEXT: std %f14, 184(%r11) # 8-byte Spill
+; CHECK-NEXT: std %f15, 176(%r11) # 8-byte Spill
; CHECK-NEXT: .cfi_offset %f8, -168
; CHECK-NEXT: .cfi_offset %f9, -176
; CHECK-NEXT: .cfi_offset %f10, -184
@@ -59,14 +59,14 @@ define signext i32 @foo() "frame-pointer"="all" {
; CHECK-NEXT: .LBB0_2: # %entry
; CHECK-NEXT: lg %r1, 168(%r11)
; CHECK-NEXT: lgf %r2, 0(%r1)
-; CHECK-NEXT: ld %f8, 232(%r11) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f9, 224(%r11) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f10, 216(%r11) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f11, 208(%r11) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f12, 200(%r11) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f13, 192(%r11) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f14, 184(%r11) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f15, 176(%r11) # 8-byte Folded Reload
+; CHECK-NEXT: ld %f8, 232(%r11) # 8-byte Reload
+; CHECK-NEXT: ld %f9, 224(%r11) # 8-byte Reload
+; CHECK-NEXT: ld %f10, 216(%r11) # 8-byte Reload
+; CHECK-NEXT: ld %f11, 208(%r11) # 8-byte Reload
+; CHECK-NEXT: ld %f12, 200(%r11) # 8-byte Reload
+; CHECK-NEXT: ld %f13, 192(%r11) # 8-byte Reload
+; CHECK-NEXT: ld %f14, 184(%r11) # 8-byte Reload
+; CHECK-NEXT: ld %f15, 176(%r11) # 8-byte Reload
; CHECK-NEXT: lmg %r6, %r15, 288(%r11)
; CHECK-NEXT: br %r14
entry:
@@ -101,14 +101,14 @@ define signext i32 @foo1() "backchain" "frame-pointer"="all" {
; CHECK-NEXT: stg %r1, 0(%r15)
; CHECK-NEXT: lgr %r11, %r15
; CHECK-NEXT: .cfi_def_cfa_register %r11
-; CHECK-NEXT: std %f8, 232(%r11) # 8-byte Folded Spill
-; CHECK-NEXT: std %f9, 224(%r11) # 8-byte Folded Spill
-; CHECK-NEXT: std %f10, 216(%r11) # 8-byte Folded Spill
-; CHECK-NEXT: std %f11, 208(%r11) # 8-byte Folded Spill
-; CHECK-NEXT: std %f12, 200(%r11) # 8-byte Folded Spill
-; CHECK-NEXT: std %f13, 192(%r11) # 8-byte Folded Spill
-; CHECK-NEXT: std %f14, 184(%r11) # 8-byte Folded Spill
-; CHECK-NEXT: std %f15, 176(%r11) # 8-byte Folded Spill
+; CHECK-NEXT: std %f8, 232(%r11) # 8-byte Spill
+; CHECK-NEXT: std %f9, 224(%r11) # 8-byte Spill
+; CHECK-NEXT: std %f10, 216(%r11) # 8-byte Spill
+; CHECK-NEXT: std %f11, 208(%r11) # 8-byte Spill
+; CHECK-NEXT: std %f12, 200(%r11) # 8-byte Spill
+; CHECK-NEXT: std %f13, 192(%r11) # 8-byte Spill
+; CHECK-NEXT: std %f14, 184(%r11) # 8-byte Spill
+; CHECK-NEXT: std %f15, 176(%r11) # 8-byte Spill
; CHECK-NEXT: .cfi_offset %f8, -168
; CHECK-NEXT: .cfi_offset %f9, -176
; CHECK-NEXT: .cfi_offset %f10, -184
@@ -132,14 +132,14 @@ define signext i32 @foo1() "backchain" "frame-pointer"="all" {
; CHECK-NEXT: .LBB1_2: # %entry
; CHECK-NEXT: lg %r1, 168(%r11)
; CHECK-NEXT: lgf %r2, 0(%r1)
-; CHECK-NEXT: ld %f8, 232(%r11) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f9, 224(%r11) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f10, 216(%r11) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f11, 208(%r11) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f12, 200(%r11) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f13, 192(%r11) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f14, 184(%r11) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f15, 176(%r11) # 8-byte Folded Reload
+; CHECK-NEXT: ld %f8, 232(%r11) # 8-byte Reload
+; CHECK-NEXT: ld %f9, 224(%r11) # 8-byte Reload
+; CHECK-NEXT: ld %f10, 216(%r11) # 8-byte Reload
+; CHECK-NEXT: ld %f11, 208(%r11) # 8-byte Reload
+; CHECK-NEXT: ld %f12, 200(%r11) # 8-byte Reload
+; CHECK-NEXT: ld %f13, 192(%r11) # 8-byte Reload
+; CHECK-NEXT: ld %f14, 184(%r11) # 8-byte Reload
+; CHECK-NEXT: ld %f15, 176(%r11) # 8-byte Reload
; CHECK-NEXT: lmg %r6, %r15, 288(%r11)
; CHECK-NEXT: br %r14
entry:
diff --git a/llvm/test/CodeGen/SystemZ/builtin-setjmp-spills.ll b/llvm/test/CodeGen/SystemZ/builtin-setjmp-spills.ll
index 65657ec9f1826..5626f45ac8bbb 100644
--- a/llvm/test/CodeGen/SystemZ/builtin-setjmp-spills.ll
+++ b/llvm/test/CodeGen/SystemZ/builtin-setjmp-spills.ll
@@ -49,14 +49,14 @@ define signext i32 @func() {
; CHECK-NEXT: .cfi_offset %r15, -40
; CHECK-NEXT: aghi %r15, -384
; CHECK-NEXT: .cfi_def_cfa_offset 544
-; CHECK-NEXT: std %f8, 376(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f9, 368(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f10, 360(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f11, 352(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f12, 344(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f13, 336(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f14, 328(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f15, 320(%r15) # 8-byte Folded Spill
+; CHECK-NEXT: std %f8, 376(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f9, 368(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f10, 360(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f11, 352(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f12, 344(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f13, 336(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f14, 328(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f15, 320(%r15) # 8-byte Spill
; CHECK-NEXT: .cfi_offset %f8, -168
; CHECK-NEXT: .cfi_offset %f9, -176
; CHECK-NEXT: .cfi_offset %f10, -184
@@ -67,64 +67,64 @@ define signext i32 @func() {
; CHECK-NEXT: .cfi_offset %f15, -224
; CHECK-NEXT: lgrl %r1, t at GOT
; CHECK-NEXT: lgrl %r2, s at GOT
-; CHECK-NEXT: stg %r1, 312(%r15) # 8-byte Folded Spill
+; CHECK-NEXT: stg %r1, 312(%r15) # 8-byte Spill
; CHECK-NEXT: mvhi 0(%r1), 1
; CHECK-NEXT: lgrl %r1, r at GOT
; CHECK-NEXT: lgrl %r3, q at GOT
-; CHECK-NEXT: stg %r2, 304(%r15) # 8-byte Folded Spill
+; CHECK-NEXT: stg %r2, 304(%r15) # 8-byte Spill
; CHECK-NEXT: mvhi 0(%r2), 1
; CHECK-NEXT: lgrl %r2, p at GOT
-; CHECK-NEXT: stg %r1, 296(%r15) # 8-byte Folded Spill
+; CHECK-NEXT: stg %r1, 296(%r15) # 8-byte Spill
; CHECK-NEXT: mvhi 0(%r1), 1
-; CHECK-NEXT: stg %r3, 288(%r15) # 8-byte Folded Spill
+; CHECK-NEXT: stg %r3, 288(%r15) # 8-byte Spill
; CHECK-NEXT: mvhi 0(%r3), 1
; CHECK-NEXT: lgrl %r1, o at GOT
-; CHECK-NEXT: stg %r2, 280(%r15) # 8-byte Folded Spill
+; CHECK-NEXT: stg %r2, 280(%r15) # 8-byte Spill
; CHECK-NEXT: mvhi 0(%r2), 1
; CHECK-NEXT: lgrl %r2, n at GOT
; CHECK-NEXT: lgrl %r3, m at GOT
-; CHECK-NEXT: stg %r1, 272(%r15) # 8-byte Folded Spill
+; CHECK-NEXT: stg %r1, 272(%r15) # 8-byte Spill
; CHECK-NEXT: mvhi 0(%r1), 1
; CHECK-NEXT: lgrl %r1, l at GOT
-; CHECK-NEXT: stg %r2, 264(%r15) # 8-byte Folded Spill
+; CHECK-NEXT: stg %r2, 264(%r15) # 8-byte Spill
; CHECK-NEXT: mvhi 0(%r2), 1
-; CHECK-NEXT: stg %r3, 256(%r15) # 8-byte Folded Spill
+; CHECK-NEXT: stg %r3, 256(%r15) # 8-byte Spill
; CHECK-NEXT: mvhi 0(%r3), 1
; CHECK-NEXT: lgrl %r2, k at GOT
-; CHECK-NEXT: stg %r1, 248(%r15) # 8-byte Folded Spill
+; CHECK-NEXT: stg %r1, 248(%r15) # 8-byte Spill
; CHECK-NEXT: mvhi 0(%r1), 1
; CHECK-NEXT: lgrl %r1, j at GOT
; CHECK-NEXT: lgrl %r3, i at GOT
-; CHECK-NEXT: stg %r2, 240(%r15) # 8-byte Folded Spill
+; CHECK-NEXT: stg %r2, 240(%r15) # 8-byte Spill
; CHECK-NEXT: mvhi 0(%r2), 1
; CHECK-NEXT: lgrl %r2, h at GOT
-; CHECK-NEXT: stg %r1, 232(%r15) # 8-byte Folded Spill
+; CHECK-NEXT: stg %r1, 232(%r15) # 8-byte Spill
; CHECK-NEXT: mvhi 0(%r1), 1
-; CHECK-NEXT: stg %r3, 224(%r15) # 8-byte Folded Spill
+; CHECK-NEXT: stg %r3, 224(%r15) # 8-byte Spill
; CHECK-NEXT: mvhi 0(%r3), 1
; CHECK-NEXT: lgrl %r1, g at GOT
-; CHECK-NEXT: stg %r2, 216(%r15) # 8-byte Folded Spill
+; CHECK-NEXT: stg %r2, 216(%r15) # 8-byte Spill
; CHECK-NEXT: mvhi 0(%r2), 1
; CHECK-NEXT: lgrl %r2, f at GOT
; CHECK-NEXT: lgrl %r3, e at GOT
-; CHECK-NEXT: stg %r1, 208(%r15) # 8-byte Folded Spill
+; CHECK-NEXT: stg %r1, 208(%r15) # 8-byte Spill
; CHECK-NEXT: mvhi 0(%r1), 1
; CHECK-NEXT: lgrl %r1, d at GOT
-; CHECK-NEXT: stg %r2, 200(%r15) # 8-byte Folded Spill
+; CHECK-NEXT: stg %r2, 200(%r15) # 8-byte Spill
; CHECK-NEXT: mvhi 0(%r2), 1
-; CHECK-NEXT: stg %r3, 192(%r15) # 8-byte Folded Spill
+; CHECK-NEXT: stg %r3, 192(%r15) # 8-byte Spill
; CHECK-NEXT: mvhi 0(%r3), 1
; CHECK-NEXT: lgrl %r2, c at GOT
-; CHECK-NEXT: stg %r1, 184(%r15) # 8-byte Folded Spill
+; CHECK-NEXT: stg %r1, 184(%r15) # 8-byte Spill
; CHECK-NEXT: mvhi 0(%r1), 1
; CHECK-NEXT: lgrl %r3, b at GOT
; CHECK-NEXT: lgrl %r4, a at GOT
-; CHECK-NEXT: stg %r2, 176(%r15) # 8-byte Folded Spill
+; CHECK-NEXT: stg %r2, 176(%r15) # 8-byte Spill
; CHECK-NEXT: mvhi 0(%r2), 1
; CHECK-NEXT: lgrl %r1, buf at GOT
-; CHECK-NEXT: stg %r3, 168(%r15) # 8-byte Folded Spill
+; CHECK-NEXT: stg %r3, 168(%r15) # 8-byte Spill
; CHECK-NEXT: mvhi 0(%r3), 1
-; CHECK-NEXT: stg %r4, 160(%r15) # 8-byte Folded Spill
+; CHECK-NEXT: stg %r4, 160(%r15) # 8-byte Spill
; CHECK-NEXT: mvhi 0(%r4), 1
; CHECK-NEXT: larl %r0, .LBB0_2
; CHECK-NEXT: stg %r0, 8(%r1)
@@ -136,55 +136,55 @@ define signext i32 @func() {
; CHECK-NEXT: # %entry
; CHECK-NEXT: lhi %r0, 1
; CHECK-NEXT: .LBB0_3: # %entry
-; CHECK-NEXT: lg %r1, 160(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: lg %r1, 160(%r15) # 8-byte Reload
; CHECK-NEXT: a %r0, 0(%r1)
-; CHECK-NEXT: lg %r1, 168(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: lg %r1, 168(%r15) # 8-byte Reload
; CHECK-NEXT: a %r0, 0(%r1)
-; CHECK-NEXT: lg %r1, 176(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: lg %r1, 176(%r15) # 8-byte Reload
; CHECK-NEXT: a %r0, 0(%r1)
-; CHECK-NEXT: lg %r1, 184(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: lg %r1, 184(%r15) # 8-byte Reload
; CHECK-NEXT: a %r0, 0(%r1)
-; CHECK-NEXT: lg %r1, 192(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: lg %r1, 192(%r15) # 8-byte Reload
; CHECK-NEXT: a %r0, 0(%r1)
-; CHECK-NEXT: lg %r1, 200(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: lg %r1, 200(%r15) # 8-byte Reload
; CHECK-NEXT: a %r0, 0(%r1)
-; CHECK-NEXT: lg %r1, 208(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: lg %r1, 208(%r15) # 8-byte Reload
; CHECK-NEXT: a %r0, 0(%r1)
-; CHECK-NEXT: lg %r1, 216(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: lg %r1, 216(%r15) # 8-byte Reload
; CHECK-NEXT: a %r0, 0(%r1)
-; CHECK-NEXT: lg %r1, 224(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: lg %r1, 224(%r15) # 8-byte Reload
; CHECK-NEXT: a %r0, 0(%r1)
-; CHECK-NEXT: lg %r1, 232(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: lg %r1, 232(%r15) # 8-byte Reload
; CHECK-NEXT: a %r0, 0(%r1)
-; CHECK-NEXT: lg %r1, 240(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: lg %r1, 240(%r15) # 8-byte Reload
; CHECK-NEXT: a %r0, 0(%r1)
-; CHECK-NEXT: lg %r1, 248(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: lg %r1, 248(%r15) # 8-byte Reload
; CHECK-NEXT: a %r0, 0(%r1)
-; CHECK-NEXT: lg %r1, 256(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: lg %r1, 256(%r15) # 8-byte Reload
; CHECK-NEXT: a %r0, 0(%r1)
-; CHECK-NEXT: lg %r1, 264(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: lg %r1, 264(%r15) # 8-byte Reload
; CHECK-NEXT: a %r0, 0(%r1)
-; CHECK-NEXT: lg %r1, 272(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: lg %r1, 272(%r15) # 8-byte Reload
; CHECK-NEXT: a %r0, 0(%r1)
-; CHECK-NEXT: lg %r1, 280(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: lg %r1, 280(%r15) # 8-byte Reload
; CHECK-NEXT: a %r0, 0(%r1)
-; CHECK-NEXT: lg %r1, 288(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: lg %r1, 288(%r15) # 8-byte Reload
; CHECK-NEXT: a %r0, 0(%r1)
-; CHECK-NEXT: lg %r1, 296(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: lg %r1, 296(%r15) # 8-byte Reload
; CHECK-NEXT: a %r0, 0(%r1)
-; CHECK-NEXT: lg %r1, 304(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: lg %r1, 304(%r15) # 8-byte Reload
; CHECK-NEXT: a %r0, 0(%r1)
-; CHECK-NEXT: lg %r1, 312(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: lg %r1, 312(%r15) # 8-byte Reload
; CHECK-NEXT: a %r0, 0(%r1)
; CHECK-NEXT: lgfr %r2, %r0
-; CHECK-NEXT: ld %f8, 376(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f9, 368(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f10, 360(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f11, 352(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f12, 344(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f13, 336(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f14, 328(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f15, 320(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: ld %f8, 376(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f9, 368(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f10, 360(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f11, 352(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f12, 344(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f13, 336(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f14, 328(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f15, 320(%r15) # 8-byte Reload
; CHECK-NEXT: lmg %r6, %r15, 432(%r15)
; CHECK-NEXT: br %r14
entry:
diff --git a/llvm/test/CodeGen/SystemZ/builtin-setjmp.ll b/llvm/test/CodeGen/SystemZ/builtin-setjmp.ll
index cfeba5a051ad1..37706c7bcd212 100644
--- a/llvm/test/CodeGen/SystemZ/builtin-setjmp.ll
+++ b/llvm/test/CodeGen/SystemZ/builtin-setjmp.ll
@@ -26,14 +26,14 @@ define void @foo() {
; CHECK-NEXT: .cfi_offset %r15, -40
; CHECK-NEXT: aghi %r15, -64
; CHECK-NEXT: .cfi_def_cfa_offset 224
-; CHECK-NEXT: std %f8, 56(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f9, 48(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f10, 40(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f11, 32(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f12, 24(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f13, 16(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f14, 8(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f15, 0(%r15) # 8-byte Folded Spill
+; CHECK-NEXT: std %f8, 56(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f9, 48(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f10, 40(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f11, 32(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f12, 24(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f13, 16(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f14, 8(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f15, 0(%r15) # 8-byte Spill
; CHECK-NEXT: .cfi_offset %f8, -168
; CHECK-NEXT: .cfi_offset %f9, -176
; CHECK-NEXT: .cfi_offset %f10, -184
@@ -49,14 +49,14 @@ define void @foo() {
; CHECK-NEXT: .LBB0_1: # Block address taken
; CHECK-NEXT: # %entry
; CHECK-NEXT: .LBB0_2: # %entry
-; CHECK-NEXT: ld %f8, 56(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f9, 48(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f10, 40(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f11, 32(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f12, 24(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f13, 16(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f14, 8(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f15, 0(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: ld %f8, 56(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f9, 48(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f10, 40(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f11, 32(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f12, 24(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f13, 16(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f14, 8(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f15, 0(%r15) # 8-byte Reload
; CHECK-NEXT: lmg %r6, %r15, 112(%r15)
; CHECK-NEXT: br %r14
entry:
@@ -82,14 +82,14 @@ define void @foo1() "backchain" {
; CHECK-NEXT: aghi %r15, -64
; CHECK-NEXT: .cfi_def_cfa_offset 224
; CHECK-NEXT: stg %r1, 0(%r15)
-; CHECK-NEXT: std %f8, 56(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f9, 48(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f10, 40(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f11, 32(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f12, 24(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f13, 16(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f14, 8(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f15, 0(%r15) # 8-byte Folded Spill
+; CHECK-NEXT: std %f8, 56(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f9, 48(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f10, 40(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f11, 32(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f12, 24(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f13, 16(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f14, 8(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f15, 0(%r15) # 8-byte Spill
; CHECK-NEXT: .cfi_offset %f8, -168
; CHECK-NEXT: .cfi_offset %f9, -176
; CHECK-NEXT: .cfi_offset %f10, -184
@@ -107,14 +107,14 @@ define void @foo1() "backchain" {
; CHECK-NEXT: .LBB1_1: # Block address taken
; CHECK-NEXT: # %entry
; CHECK-NEXT: .LBB1_2: # %entry
-; CHECK-NEXT: ld %f8, 56(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f9, 48(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f10, 40(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f11, 32(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f12, 24(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f13, 16(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f14, 8(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f15, 0(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: ld %f8, 56(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f9, 48(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f10, 40(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f11, 32(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f12, 24(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f13, 16(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f14, 8(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f15, 0(%r15) # 8-byte Reload
; CHECK-NEXT: lmg %r6, %r15, 112(%r15)
; CHECK-NEXT: br %r14
entry:
diff --git a/llvm/test/CodeGen/SystemZ/fmuladd-soft-float.ll b/llvm/test/CodeGen/SystemZ/fmuladd-soft-float.ll
index d0bfe74719f89..1447c576f48ae 100644
--- a/llvm/test/CodeGen/SystemZ/fmuladd-soft-float.ll
+++ b/llvm/test/CodeGen/SystemZ/fmuladd-soft-float.ll
@@ -101,9 +101,9 @@ define <4 x float> @fmuladd_contract_v4f32(<4 x float> %a, <4 x float> %b, <4 x
; SOFT-FLOAT-NEXT: aghi %r15, -176
; SOFT-FLOAT-NEXT: .cfi_def_cfa_offset 336
; SOFT-FLOAT-NEXT: llgf %r0, 388(%r15)
-; SOFT-FLOAT-NEXT: stg %r0, 168(%r15) # 8-byte Folded Spill
+; SOFT-FLOAT-NEXT: stg %r0, 168(%r15) # 8-byte Spill
; SOFT-FLOAT-NEXT: llgf %r0, 380(%r15)
-; SOFT-FLOAT-NEXT: stg %r0, 160(%r15) # 8-byte Folded Spill
+; SOFT-FLOAT-NEXT: stg %r0, 160(%r15) # 8-byte Spill
; SOFT-FLOAT-NEXT: llgf %r11, 372(%r15)
; SOFT-FLOAT-NEXT: llgf %r10, 364(%r15)
; SOFT-FLOAT-NEXT: llgf %r8, 340(%r15)
@@ -139,11 +139,11 @@ define <4 x float> @fmuladd_contract_v4f32(<4 x float> %a, <4 x float> %b, <4 x
; SOFT-FLOAT-NEXT: brasl %r14, __addsf3 at PLT
; SOFT-FLOAT-NEXT: lgr %r12, %r2
; SOFT-FLOAT-NEXT: lgr %r2, %r13
-; SOFT-FLOAT-NEXT: lg %r3, 160(%r15) # 8-byte Folded Reload
+; SOFT-FLOAT-NEXT: lg %r3, 160(%r15) # 8-byte Reload
; SOFT-FLOAT-NEXT: brasl %r14, __addsf3 at PLT
; SOFT-FLOAT-NEXT: lgr %r13, %r2
; SOFT-FLOAT-NEXT: lgr %r2, %r9
-; SOFT-FLOAT-NEXT: lg %r3, 168(%r15) # 8-byte Folded Reload
+; SOFT-FLOAT-NEXT: lg %r3, 168(%r15) # 8-byte Reload
; SOFT-FLOAT-NEXT: brasl %r14, __addsf3 at PLT
; SOFT-FLOAT-NEXT: lgr %r5, %r2
; SOFT-FLOAT-NEXT: lr %r2, %r10
@@ -203,15 +203,15 @@ define <4 x double> @fmuladd_contract_v4f64(<4 x double> %a, <4 x double> %b, <4
; SOFT-FLOAT-NEXT: brasl %r14, __adddf3 at PLT
; SOFT-FLOAT-NEXT: lgr %r10, %r2
; SOFT-FLOAT-NEXT: lgr %r2, %r12
-; SOFT-FLOAT-NEXT: lg %r3, 160(%r15) # 8-byte Folded Reload
+; SOFT-FLOAT-NEXT: lg %r3, 160(%r15) # 8-byte Reload
; SOFT-FLOAT-NEXT: brasl %r14, __adddf3 at PLT
; SOFT-FLOAT-NEXT: lgr %r12, %r2
; SOFT-FLOAT-NEXT: lgr %r2, %r13
-; SOFT-FLOAT-NEXT: lg %r3, 168(%r15) # 8-byte Folded Reload
+; SOFT-FLOAT-NEXT: lg %r3, 168(%r15) # 8-byte Reload
; SOFT-FLOAT-NEXT: brasl %r14, __adddf3 at PLT
; SOFT-FLOAT-NEXT: lgr %r13, %r2
; SOFT-FLOAT-NEXT: lgr %r2, %r11
-; SOFT-FLOAT-NEXT: lg %r3, 176(%r15) # 8-byte Folded Reload
+; SOFT-FLOAT-NEXT: lg %r3, 176(%r15) # 8-byte Reload
; SOFT-FLOAT-NEXT: brasl %r14, __adddf3 at PLT
; SOFT-FLOAT-NEXT: lgr %r5, %r2
; SOFT-FLOAT-NEXT: lgr %r2, %r10
diff --git a/llvm/test/CodeGen/SystemZ/foldmemop-imm-02.mir b/llvm/test/CodeGen/SystemZ/foldmemop-imm-02.mir
index 653bb42e1cad2..d5ebbaabdceba 100644
--- a/llvm/test/CodeGen/SystemZ/foldmemop-imm-02.mir
+++ b/llvm/test/CodeGen/SystemZ/foldmemop-imm-02.mir
@@ -125,7 +125,7 @@ body: |
# CHECK: mvhi 160(%r15), 0 # 4-byte Folded Spill
# CHECK: mvc 160(4,%r15), 0(%r2) # 4-byte Folded Spill
# CHECK-LABEL: .LBB2_2:
-# CHECK: l %r0, 160(%r15) # 4-byte Folded Reload
+# CHECK: l %r0, 160(%r15) # 4-byte Reload
# CHECK: clfi %r0, 65536
---
name: fun2
@@ -182,7 +182,7 @@ body: |
# CHECK: mvghi 160(%r15), 0 # 8-byte Folded Spill
# CHECK: mvc 160(8,%r15), 0(%r2) # 8-byte Folded Spill
# CHECK-LABEL: .LBB3_2:
-# CHECK: lg %r0, 160(%r15) # 8-byte Folded Reload
+# CHECK: lg %r0, 160(%r15) # 8-byte Reload
# CHECK: clgfi %r0, 65536
---
name: fun3
diff --git a/llvm/test/CodeGen/SystemZ/foldmemop-vec-cc.mir b/llvm/test/CodeGen/SystemZ/foldmemop-vec-cc.mir
index dbdd3a0a21bf1..5d0663c95e532 100644
--- a/llvm/test/CodeGen/SystemZ/foldmemop-vec-cc.mir
+++ b/llvm/test/CodeGen/SystemZ/foldmemop-vec-cc.mir
@@ -9,7 +9,7 @@
...
# CHECK-LABEL: fun0:
-# CHECK: ld %f1, 160(%r15) # 8-byte Folded Reload
+# CHECK: ld %f1, 160(%r15) # 8-byte Reload
# CHECK-NEXT: wfadb %f0, %f0, %f1
---
name: fun0
diff --git a/llvm/test/CodeGen/SystemZ/foldmemop-vec-cmp.mir b/llvm/test/CodeGen/SystemZ/foldmemop-vec-cmp.mir
index 1344809651ad7..86c23643c0acb 100644
--- a/llvm/test/CodeGen/SystemZ/foldmemop-vec-cmp.mir
+++ b/llvm/test/CodeGen/SystemZ/foldmemop-vec-cmp.mir
@@ -94,7 +94,7 @@ body: |
# CDB can't be used if one operand is a VR64 (and not FP64).
# CHECK-LABEL: fun2:
-# CHECK: ld %f0, 160(%r15) # 8-byte Folded Reload
+# CHECK: ld %f0, 160(%r15) # 8-byte Reload
# CHECK-NEXT: wfcdb %v16, %f0
---
name: fun2
@@ -204,7 +204,7 @@ body: |
# CEB can't be used if one operand is a VR32 (and not FP32).
# CHECK-LABEL: fun5:
-# CHECK: lde %f0, 164(%r15) # 4-byte Folded Reload
+# CHECK: lde %f0, 164(%r15) # 4-byte Reload
# CHECK-NEXT: wfcsb %v16, %f0
---
name: fun5
@@ -314,7 +314,7 @@ body: |
# KDB can't be used if one operand is a VR64 (and not FP64).
# CHECK-LABEL: fun8:
-# CHECK: ld %f0, 160(%r15) # 8-byte Folded Reload
+# CHECK: ld %f0, 160(%r15) # 8-byte Reload
# CHECK-NEXT: wfkdb %v16, %f0
---
name: fun8
@@ -424,7 +424,7 @@ body: |
# CEB can't be used if one operand is a VR32 (and not FP32).
# CHECK-LABEL: fun11:
-# CHECK: lde %f0, 164(%r15) # 4-byte Folded Reload
+# CHECK: lde %f0, 164(%r15) # 4-byte Reload
# CHECK-NEXT: wfksb %v16, %f0
---
name: fun11
diff --git a/llvm/test/CodeGen/SystemZ/foldmemop-vec-unary.mir b/llvm/test/CodeGen/SystemZ/foldmemop-vec-unary.mir
index e811cb9ddc993..86b3b0297454a 100644
--- a/llvm/test/CodeGen/SystemZ/foldmemop-vec-unary.mir
+++ b/llvm/test/CodeGen/SystemZ/foldmemop-vec-unary.mir
@@ -44,7 +44,7 @@ body: |
# LDEB can't be used if dst operand is a VR32 (and not FP32).
# CHECK-LABEL: fun1:
-# CHECK: lde %f0, 164(%r15) # 4-byte Folded Reload
+# CHECK: lde %f0, 164(%r15) # 4-byte Reload
# CHECK-NEXT: wldeb %v16, %f0
---
name: fun1
@@ -75,7 +75,7 @@ body: |
# Spilling the destination of an fp extension needs an extra store instruction.
# CHECK-LABEL: fun2:
# CHECK: ldebr %f0, %f0
-# CHECK-NEXT: std %f0, 160(%r15) # 8-byte Folded Spill
+# CHECK-NEXT: std %f0, 160(%r15) # 8-byte Spill
---
name: fun2
alignment: 16
@@ -130,7 +130,7 @@ body: |
# SQDB can't be used if dst operand is a VR64 (and not FP64).
# CHECK-LABEL: fun4:
-# CHECK: ld %f0, 160(%r15) # 8-byte Folded Reload
+# CHECK: ld %f0, 160(%r15) # 8-byte Reload
# CHECK-NEXT: wfsqdb %v16, %f0
---
name: fun4
@@ -187,7 +187,7 @@ body: |
# SQEB can't be used if dst operand is a VR32 (and not FP32).
# CHECK-LABEL: fun6:
-# CHECK: lde %f0, 164(%r15) # 4-byte Folded Reload
+# CHECK: lde %f0, 164(%r15) # 4-byte Reload
# CHECK-NEXT: wfsqsb %v16, %f0
---
name: fun6
diff --git a/llvm/test/CodeGen/SystemZ/fp-move-02.ll b/llvm/test/CodeGen/SystemZ/fp-move-02.ll
index 9df852dccbc82..7f7ac7cda83d3 100644
--- a/llvm/test/CodeGen/SystemZ/fp-move-02.ll
+++ b/llvm/test/CodeGen/SystemZ/fp-move-02.ll
@@ -156,9 +156,9 @@ define void @f10(double %extra) {
; CHECK-NEXT: adb %f2, 0(%r1)
; CHECK-NEXT: ldr %f3, %f0
; CHECK-NEXT: adb %f3, 0(%r1)
-; CHECK-NEXT: std %f1, 176(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f2, 168(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f3, 160(%r15) # 8-byte Folded Spill
+; CHECK-NEXT: std %f1, 176(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f2, 168(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f3, 160(%r15) # 8-byte Spill
; CHECK-NEXT: ldr %f1, %f0
; CHECK-NEXT: adb %f1, 0(%r1)
; CHECK-NEXT: ldr %f2, %f0
@@ -263,14 +263,14 @@ define void @f11(i64 %mask) {
; CHECK-NEXT: .cfi_offset %r15, -40
; CHECK-NEXT: aghi %r15, -240
; CHECK-NEXT: .cfi_def_cfa_offset 400
-; CHECK-NEXT: std %f8, 232(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f9, 224(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f10, 216(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f11, 208(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f12, 200(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f13, 192(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f14, 184(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f15, 176(%r15) # 8-byte Folded Spill
+; CHECK-NEXT: std %f8, 232(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f9, 224(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f10, 216(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f11, 208(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f12, 200(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f13, 192(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f14, 184(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f15, 176(%r15) # 8-byte Spill
; CHECK-NEXT: .cfi_offset %f8, -168
; CHECK-NEXT: .cfi_offset %f9, -176
; CHECK-NEXT: .cfi_offset %f10, -184
@@ -286,8 +286,8 @@ define void @f11(i64 %mask) {
; CHECK-NEXT: ng %r3, 0(%r1)
; CHECK-NEXT: lgr %r4, %r2
; CHECK-NEXT: ng %r4, 0(%r1)
-; CHECK-NEXT: stg %r0, 168(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: stg %r3, 160(%r15) # 8-byte Folded Spill
+; CHECK-NEXT: stg %r0, 168(%r15) # 8-byte Spill
+; CHECK-NEXT: stg %r3, 160(%r15) # 8-byte Spill
; CHECK-NEXT: lgr %r0, %r2
; CHECK-NEXT: ng %r0, 0(%r1)
; CHECK-NEXT: ldgr %f10, %r4
@@ -329,14 +329,14 @@ define void @f11(i64 %mask) {
; CHECK-NEXT: std %f1, 0(%r13)
; CHECK-NEXT: jlh .LBB10_1
; CHECK-NEXT: # %bb.2: # %exit
-; CHECK-NEXT: ld %f8, 232(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f9, 224(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f10, 216(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f11, 208(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f12, 200(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f13, 192(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f14, 184(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f15, 176(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: ld %f8, 232(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f9, 224(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f10, 216(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f11, 208(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f12, 200(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f13, 192(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f14, 184(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f15, 176(%r15) # 8-byte Reload
; CHECK-NEXT: lmg %r12, %r15, 336(%r15)
; CHECK-NEXT: br %r14
entry:
@@ -431,12 +431,12 @@ define void @f12() {
; CHECK-NEXT: agr %r9, %r2
; CHECK-NEXT: agr %r10, %r2
; CHECK-NEXT: agr %r11, %r2
-; CHECK-NEXT: lg %r0, 160(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: lg %r0, 160(%r15) # 8-byte Reload
; CHECK-NEXT: agr %r0, %r2
-; CHECK-NEXT: stg %r0, 160(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: lg %r0, 168(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: stg %r0, 160(%r15) # 8-byte Spill
+; CHECK-NEXT: lg %r0, 168(%r15) # 8-byte Reload
; CHECK-NEXT: agr %r0, %r2
-; CHECK-NEXT: stg %r0, 168(%r15) # 8-byte Folded Spill
+; CHECK-NEXT: stg %r0, 168(%r15) # 8-byte Spill
; CHECK-NEXT: cgijlh %r2, 1, .LBB11_1
; CHECK-NEXT: # %bb.2: # %exit
; CHECK-NEXT: brasl %r14, foo at PLT
@@ -466,10 +466,10 @@ define void @f12() {
; CHECK-NEXT: ldgr %f1, %r11
; CHECK-NEXT: mdbr %f1, %f0
; CHECK-NEXT: std %f1, 0(%r1)
-; CHECK-NEXT: ld %f1, 160(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: ld %f1, 160(%r15) # 8-byte Reload
; CHECK-NEXT: mdbr %f1, %f0
; CHECK-NEXT: std %f1, 0(%r1)
-; CHECK-NEXT: ld %f1, 168(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: ld %f1, 168(%r15) # 8-byte Reload
; CHECK-NEXT: mdbr %f1, %f0
; CHECK-NEXT: std %f1, 0(%r1)
; CHECK-NEXT: brasl %r14, foo at PLT
@@ -554,14 +554,14 @@ define void @f13() {
; CHECK-NEXT: .cfi_offset %r15, -40
; CHECK-NEXT: aghi %r15, -240
; CHECK-NEXT: .cfi_def_cfa_offset 400
-; CHECK-NEXT: std %f8, 232(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f9, 224(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f10, 216(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f11, 208(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f12, 200(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f13, 192(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f14, 184(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f15, 176(%r15) # 8-byte Folded Spill
+; CHECK-NEXT: std %f8, 232(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f9, 224(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f10, 216(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f11, 208(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f12, 200(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f13, 192(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f14, 184(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f15, 176(%r15) # 8-byte Spill
; CHECK-NEXT: .cfi_offset %f8, -168
; CHECK-NEXT: .cfi_offset %f9, -176
; CHECK-NEXT: .cfi_offset %f10, -184
@@ -579,8 +579,8 @@ define void @f13() {
; CHECK-NEXT: ldr %f12, %f8
; CHECK-NEXT: ldr %f11, %f8
; CHECK-NEXT: ldr %f10, %f8
-; CHECK-NEXT: std %f8, 160(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f8, 168(%r15) # 8-byte Folded Spill
+; CHECK-NEXT: std %f8, 160(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f8, 168(%r15) # 8-byte Spill
; CHECK-NEXT: .LBB12_1: # %loop
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: brasl %r14, bar at PLT
@@ -593,12 +593,12 @@ define void @f13() {
; CHECK-NEXT: cdb %f0, 0(%r13)
; CHECK-NEXT: mdbr %f11, %f0
; CHECK-NEXT: mdbr %f10, %f0
-; CHECK-NEXT: ld %f1, 160(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: ld %f1, 160(%r15) # 8-byte Reload
; CHECK-NEXT: mdbr %f1, %f0
-; CHECK-NEXT: std %f1, 160(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: ld %f1, 168(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: std %f1, 160(%r15) # 8-byte Spill
+; CHECK-NEXT: ld %f1, 168(%r15) # 8-byte Reload
; CHECK-NEXT: mdbr %f1, %f0
-; CHECK-NEXT: std %f1, 168(%r15) # 8-byte Folded Spill
+; CHECK-NEXT: std %f1, 168(%r15) # 8-byte Spill
; CHECK-NEXT: jlh .LBB12_1
; CHECK-NEXT: # %bb.2: # %exit
; CHECK-NEXT: brasl %r14, foo at PLT
@@ -628,21 +628,21 @@ define void @f13() {
; CHECK-NEXT: lgdr %r2, %f10
; CHECK-NEXT: agr %r2, %r0
; CHECK-NEXT: stg %r2, 0(%r1)
-; CHECK-NEXT: lg %r2, 160(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: lg %r2, 160(%r15) # 8-byte Reload
; CHECK-NEXT: agr %r2, %r0
; CHECK-NEXT: stg %r2, 0(%r1)
-; CHECK-NEXT: lg %r2, 168(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: lg %r2, 168(%r15) # 8-byte Reload
; CHECK-NEXT: agr %r2, %r0
; CHECK-NEXT: stg %r2, 0(%r1)
; CHECK-NEXT: brasl %r14, foo at PLT
-; CHECK-NEXT: ld %f8, 232(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f9, 224(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f10, 216(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f11, 208(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f12, 200(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f13, 192(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f14, 184(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f15, 176(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: ld %f8, 232(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f9, 224(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f10, 216(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f11, 208(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f12, 200(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f13, 192(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f14, 184(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f15, 176(%r15) # 8-byte Reload
; CHECK-NEXT: lmg %r13, %r15, 344(%r15)
; CHECK-NEXT: br %r14
entry:
diff --git a/llvm/test/CodeGen/SystemZ/frame-22.ll b/llvm/test/CodeGen/SystemZ/frame-22.ll
index fd4e3fec508a9..80e3a2f5ce930 100644
--- a/llvm/test/CodeGen/SystemZ/frame-22.ll
+++ b/llvm/test/CodeGen/SystemZ/frame-22.ll
@@ -8,11 +8,11 @@ define void @f1() #0 {
; CHECK: stmg %r12, %r15, 128(%r15)
; CHECK-NEXT: .cfi_offset %r12, -32
; CHECK-NEXT: .cfi_offset %r15, -8
-; CHECK-NEXT: std %f8, 120(%r15) # 8-byte Folded Spill
+; CHECK-NEXT: std %f8, 120(%r15) # 8-byte Spill
; CHECK-NEXT: .cfi_offset %f8, -40
; CHECK-NEXT: #APP
; CHECK-NEXT: #NO_APP
-; CHECK-NEXT: ld %f8, 120(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: ld %f8, 120(%r15) # 8-byte Reload
; CHECK-NEXT: lmg %r12, %r15, 128(%r15)
; CHECK-NEXT: br %r14
call void asm sideeffect "", "~{f8},~{r12}"() nounwind
@@ -25,14 +25,14 @@ define anyregcc void @f2() #0 {
; CHECK: stmg %r3, %r15, 56(%r15)
; CHECK-NEXT: .cfi_offset %r3, -104
; CHECK-NEXT: .cfi_offset %r15, -8
-; CHECK-NEXT: std %f0, 48(%r15) # 8-byte Folded Spill
-; CHECK-NEXT: std %f1, 40(%r15) # 8-byte Folded Spill
+; CHECK-NEXT: std %f0, 48(%r15) # 8-byte Spill
+; CHECK-NEXT: std %f1, 40(%r15) # 8-byte Spill
; CHECK-NEXT: .cfi_offset %f0, -112
; CHECK-NEXT: .cfi_offset %f1, -120
; CHECK-NEXT: #APP
; CHECK-NEXT: #NO_APP
-; CHECK-NEXT: ld %f0, 48(%r15) # 8-byte Folded Reload
-; CHECK-NEXT: ld %f1, 40(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: ld %f0, 48(%r15) # 8-byte Reload
+; CHECK-NEXT: ld %f1, 40(%r15) # 8-byte Reload
; CHECK-NEXT: lmg %r3, %r15, 56(%r15)
; CHECK-NEXT: br %r14
call void asm sideeffect "", "~{f0},~{f1},~{r3}"() nounwind
@@ -43,14 +43,14 @@ define anyregcc void @f2() #0 {
define i64 @f3(i64 %a, i64 %b, i64 %c, i64 %d, i64 %e, i64 %f,
double %A, double %B, double %C, double %D, double %E) #0 {
; CHECK-LABEL: f3:
-; CHECK: std %f8, 152(%r15) # 8-byte Folded Spill
+; CHECK: std %f8, 152(%r15) # 8-byte Spill
; CHECK-NEXT: .cfi_offset %f8, -8
; CHECK-NEXT: ld %f0, 168(%r15)
; CHECK-NEXT: cgdbr %r2, 5, %f0
; CHECK-NEXT: ag %r2, 160(%r15)
; CHECK-NEXT: #APP
; CHECK-NEXT: #NO_APP
-; CHECK-NEXT: ld %f8, 152(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: ld %f8, 152(%r15) # 8-byte Reload
; CHECK-NEXT: br %r14
call void asm sideeffect "", "~{f8}"() nounwind
%Ei = fptosi double %E to i64
@@ -67,7 +67,7 @@ define i64 @f4() #0 {
; CHECK-NEXT: .cfi_offset %r15, -8
; CHECK-NEXT: aghi %r15, -104
; CHECK-NEXT: .cfi_def_cfa_offset 264
-; CHECK-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
+; CHECK-NEXT: std %f8, 176(%r15) # 8-byte Spill
; CHECK-NEXT: .cfi_offset %f8, -88
; CHECK-NEXT: #APP
; CHECK-NEXT: #NO_APP
@@ -75,7 +75,7 @@ define i64 @f4() #0 {
; CHECK-NEXT: stg %r0, 168(%r15)
; CHECK: mvghi 160(%r15), 6
; CHECK-NEXT: brasl %r14, f3 at PLT
-; CHECK-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
+; CHECK-NEXT: ld %f8, 176(%r15) # 8-byte Reload
; CHECK-NEXT: lmg %r6, %r15, 184(%r15)
; CHECK-NEXT: br %r14
call void asm sideeffect "", "~{f8}"() nounwind
diff --git a/llvm/test/CodeGen/SystemZ/int-uadd-03.ll b/llvm/test/CodeGen/SystemZ/int-uadd-03.ll
index 4ecbb95b639b1..95b97ac854ec4 100644
--- a/llvm/test/CodeGen/SystemZ/int-uadd-03.ll
+++ b/llvm/test/CodeGen/SystemZ/int-uadd-03.ll
@@ -244,9 +244,9 @@ define zeroext i1 @f11(ptr %ptr0) {
; CHECK-NEXT: a %r11, 56(%r2)
; CHECK-NEXT: lhi %r1, 100
; CHECK-NEXT: a %r1, 64(%r2)
-; CHECK-NEXT: st %r1, 160(%r15) # 4-byte Folded Spill
+; CHECK-NEXT: st %r1, 160(%r15) # 4-byte Spill
; CHECK-NEXT: a %r0, 72(%r2)
-; CHECK-NEXT: st %r0, 164(%r15) # 4-byte Folded Spill
+; CHECK-NEXT: st %r0, 164(%r15) # 4-byte Spill
; CHECK-NEXT: st %r12, 0(%r2)
; CHECK-NEXT: st %r13, 8(%r2)
; CHECK-NEXT: st %r6, 16(%r2)
diff --git a/llvm/test/CodeGen/SystemZ/int-usub-03.ll b/llvm/test/CodeGen/SystemZ/int-usub-03.ll
index 83c3db4199255..af2abf856ff29 100644
--- a/llvm/test/CodeGen/SystemZ/int-usub-03.ll
+++ b/llvm/test/CodeGen/SystemZ/int-usub-03.ll
@@ -252,9 +252,9 @@ define zeroext i1 @f11(ptr %ptr0) {
; CHECK-NEXT: a %r11, 56(%r2)
; CHECK-NEXT: lhi %r1, 100
; CHECK-NEXT: a %r1, 64(%r2)
-; CHECK-NEXT: st %r1, 160(%r15) # 4-byte Folded Spill
+; CHECK-NEXT: st %r1, 160(%r15) # 4-byte Spill
; CHECK-NEXT: a %r0, 72(%r2)
-; CHECK-NEXT: st %r0, 164(%r15) # 4-byte Folded Spill
+; CHECK-NEXT: st %r0, 164(%r15) # 4-byte Spill
; CHECK-NEXT: st %r12, 0(%r2)
; CHECK-NEXT: st %r13, 8(%r2)
; CHECK-NEXT: st %r6, 16(%r2)
diff --git a/llvm/test/CodeGen/SystemZ/vector-constrained-fp-intrinsics.ll b/llvm/test/CodeGen/SystemZ/vector-constrained-fp-intrinsics.ll
index a3e453de913fe..e0818ea3da294 100644
--- a/llvm/test/CodeGen/SystemZ/vector-constrained-fp-intrinsics.ll
+++ b/llvm/test/CodeGen/SystemZ/vector-constrained-fp-intrinsics.ll
@@ -226,8 +226,8 @@ define <2 x double> @constrained_vector_frem_v2f64() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -176
; S390X-NEXT: .cfi_def_cfa_offset 336
-; S390X-NEXT: std %f8, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: larl %r1, .LCPI6_0
@@ -243,8 +243,8 @@ define <2 x double> @constrained_vector_frem_v2f64() #0 {
; S390X-NEXT: ldr %f2, %f8
; S390X-NEXT: brasl %r14, fmod at PLT
; S390X-NEXT: ldr %f2, %f9
-; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 288(%r15)
; S390X-NEXT: br %r14
;
@@ -255,7 +255,7 @@ define <2 x double> @constrained_vector_frem_v2f64() #0 {
; SZ13-NEXT: .cfi_offset %r15, -40
; SZ13-NEXT: aghi %r15, -184
; SZ13-NEXT: .cfi_def_cfa_offset 344
-; SZ13-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
+; SZ13-NEXT: std %f8, 176(%r15) # 8-byte Spill
; SZ13-NEXT: .cfi_offset %f8, -168
; SZ13-NEXT: larl %r1, .LCPI6_0
; SZ13-NEXT: ld %f8, 0(%r1)
@@ -268,7 +268,7 @@ define <2 x double> @constrained_vector_frem_v2f64() #0 {
; SZ13-NEXT: ldr %f2, %f8
; SZ13-NEXT: brasl %r14, fmod at PLT
; SZ13-NEXT: vl %v1, 160(%r15), 3 # 16-byte Folded Reload
-; SZ13-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
+; SZ13-NEXT: ld %f8, 176(%r15) # 8-byte Reload
; SZ13-NEXT: # kill: def $f0d killed $f0d def $v0
; SZ13-NEXT: vmrhg %v24, %v0, %v1
; SZ13-NEXT: lmg %r14, %r15, 296(%r15)
@@ -290,9 +290,9 @@ define <3 x float> @constrained_vector_frem_v3f32() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -316,9 +316,9 @@ define <3 x float> @constrained_vector_frem_v3f32() #0 {
; S390X-NEXT: brasl %r14, fmodf at PLT
; S390X-NEXT: ler %f2, %f10
; S390X-NEXT: ler %f4, %f9
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 296(%r15)
; S390X-NEXT: br %r14
;
@@ -329,7 +329,7 @@ define <3 x float> @constrained_vector_frem_v3f32() #0 {
; SZ13-NEXT: .cfi_offset %r15, -40
; SZ13-NEXT: aghi %r15, -200
; SZ13-NEXT: .cfi_def_cfa_offset 360
-; SZ13-NEXT: std %f8, 192(%r15) # 8-byte Folded Spill
+; SZ13-NEXT: std %f8, 192(%r15) # 8-byte Spill
; SZ13-NEXT: .cfi_offset %f8, -168
; SZ13-NEXT: larl %r1, .LCPI7_0
; SZ13-NEXT: lde %f0, 0(%r1)
@@ -351,7 +351,7 @@ define <3 x float> @constrained_vector_frem_v3f32() #0 {
; SZ13-NEXT: # kill: def $f0s killed $f0s def $v0
; SZ13-NEXT: vmrhf %v0, %v1, %v0
; SZ13-NEXT: vl %v1, 176(%r15), 3 # 16-byte Folded Reload
-; SZ13-NEXT: ld %f8, 192(%r15) # 8-byte Folded Reload
+; SZ13-NEXT: ld %f8, 192(%r15) # 8-byte Reload
; SZ13-NEXT: vrepf %v1, %v1, 0
; SZ13-NEXT: vmrhg %v24, %v0, %v1
; SZ13-NEXT: lmg %r14, %r15, 312(%r15)
@@ -374,9 +374,9 @@ define void @constrained_vector_frem_v3f64(ptr %a) #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -402,9 +402,9 @@ define void @constrained_vector_frem_v3f64(ptr %a) #0 {
; S390X-NEXT: std %f0, 0(%r13)
; S390X-NEXT: std %f9, 8(%r13)
; S390X-NEXT: std %f10, 16(%r13)
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r13, %r15, 288(%r15)
; S390X-NEXT: br %r14
;
@@ -416,7 +416,7 @@ define void @constrained_vector_frem_v3f64(ptr %a) #0 {
; SZ13-NEXT: .cfi_offset %r15, -40
; SZ13-NEXT: aghi %r15, -200
; SZ13-NEXT: .cfi_def_cfa_offset 360
-; SZ13-NEXT: std %f8, 192(%r15) # 8-byte Folded Spill
+; SZ13-NEXT: std %f8, 192(%r15) # 8-byte Spill
; SZ13-NEXT: .cfi_offset %f8, -168
; SZ13-NEXT: vl %v2, 0(%r2), 4
; SZ13-NEXT: ld %f8, 16(%r2)
@@ -442,7 +442,7 @@ define void @constrained_vector_frem_v3f64(ptr %a) #0 {
; SZ13-NEXT: brasl %r14, fmod at PLT
; SZ13-NEXT: std %f0, 16(%r13)
; SZ13-NEXT: vl %v0, 160(%r15), 3 # 16-byte Folded Reload
-; SZ13-NEXT: ld %f8, 192(%r15) # 8-byte Folded Reload
+; SZ13-NEXT: ld %f8, 192(%r15) # 8-byte Reload
; SZ13-NEXT: vst %v0, 0(%r13), 4
; SZ13-NEXT: lmg %r13, %r15, 304(%r15)
; SZ13-NEXT: br %r14
@@ -465,10 +465,10 @@ define <4 x double> @constrained_vector_frem_v4f64() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -192
; S390X-NEXT: .cfi_def_cfa_offset 352
-; S390X-NEXT: std %f8, 184(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f11, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 184(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f11, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -500,10 +500,10 @@ define <4 x double> @constrained_vector_frem_v4f64() #0 {
; S390X-NEXT: ldr %f2, %f11
; S390X-NEXT: ldr %f4, %f10
; S390X-NEXT: ldr %f6, %f9
-; S390X-NEXT: ld %f8, 184(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f11, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 184(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f11, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 304(%r15)
; S390X-NEXT: br %r14
;
@@ -514,7 +514,7 @@ define <4 x double> @constrained_vector_frem_v4f64() #0 {
; SZ13-NEXT: .cfi_offset %r15, -40
; SZ13-NEXT: aghi %r15, -200
; SZ13-NEXT: .cfi_def_cfa_offset 360
-; SZ13-NEXT: std %f8, 192(%r15) # 8-byte Folded Spill
+; SZ13-NEXT: std %f8, 192(%r15) # 8-byte Spill
; SZ13-NEXT: .cfi_offset %f8, -168
; SZ13-NEXT: larl %r1, .LCPI9_0
; SZ13-NEXT: ld %f8, 0(%r1)
@@ -542,7 +542,7 @@ define <4 x double> @constrained_vector_frem_v4f64() #0 {
; SZ13-NEXT: brasl %r14, fmod at PLT
; SZ13-NEXT: vl %v1, 176(%r15), 3 # 16-byte Folded Reload
; SZ13-NEXT: vl %v24, 160(%r15), 3 # 16-byte Folded Reload
-; SZ13-NEXT: ld %f8, 192(%r15) # 8-byte Folded Reload
+; SZ13-NEXT: ld %f8, 192(%r15) # 8-byte Reload
; SZ13-NEXT: # kill: def $f0d killed $f0d def $v0
; SZ13-NEXT: vmrhg %v26, %v0, %v1
; SZ13-NEXT: lmg %r14, %r15, 312(%r15)
@@ -1252,8 +1252,8 @@ define <2 x double> @constrained_vector_pow_v2f64() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -176
; S390X-NEXT: .cfi_def_cfa_offset 336
-; S390X-NEXT: std %f8, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: larl %r1, .LCPI31_0
@@ -1269,8 +1269,8 @@ define <2 x double> @constrained_vector_pow_v2f64() #0 {
; S390X-NEXT: ldr %f2, %f8
; S390X-NEXT: brasl %r14, pow at PLT
; S390X-NEXT: ldr %f2, %f9
-; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 288(%r15)
; S390X-NEXT: br %r14
;
@@ -1281,7 +1281,7 @@ define <2 x double> @constrained_vector_pow_v2f64() #0 {
; SZ13-NEXT: .cfi_offset %r15, -40
; SZ13-NEXT: aghi %r15, -184
; SZ13-NEXT: .cfi_def_cfa_offset 344
-; SZ13-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
+; SZ13-NEXT: std %f8, 176(%r15) # 8-byte Spill
; SZ13-NEXT: .cfi_offset %f8, -168
; SZ13-NEXT: larl %r1, .LCPI31_0
; SZ13-NEXT: ld %f0, 0(%r1)
@@ -1296,7 +1296,7 @@ define <2 x double> @constrained_vector_pow_v2f64() #0 {
; SZ13-NEXT: ldr %f2, %f8
; SZ13-NEXT: brasl %r14, pow at PLT
; SZ13-NEXT: vl %v1, 160(%r15), 3 # 16-byte Folded Reload
-; SZ13-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
+; SZ13-NEXT: ld %f8, 176(%r15) # 8-byte Reload
; SZ13-NEXT: # kill: def $f0d killed $f0d def $v0
; SZ13-NEXT: vmrhg %v24, %v0, %v1
; SZ13-NEXT: lmg %r14, %r15, 296(%r15)
@@ -1318,9 +1318,9 @@ define <3 x float> @constrained_vector_pow_v3f32() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -1344,9 +1344,9 @@ define <3 x float> @constrained_vector_pow_v3f32() #0 {
; S390X-NEXT: brasl %r14, powf at PLT
; S390X-NEXT: ler %f2, %f10
; S390X-NEXT: ler %f4, %f9
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 296(%r15)
; S390X-NEXT: br %r14
;
@@ -1357,7 +1357,7 @@ define <3 x float> @constrained_vector_pow_v3f32() #0 {
; SZ13-NEXT: .cfi_offset %r15, -40
; SZ13-NEXT: aghi %r15, -200
; SZ13-NEXT: .cfi_def_cfa_offset 360
-; SZ13-NEXT: std %f8, 192(%r15) # 8-byte Folded Spill
+; SZ13-NEXT: std %f8, 192(%r15) # 8-byte Spill
; SZ13-NEXT: .cfi_offset %f8, -168
; SZ13-NEXT: larl %r1, .LCPI32_0
; SZ13-NEXT: lde %f0, 0(%r1)
@@ -1381,7 +1381,7 @@ define <3 x float> @constrained_vector_pow_v3f32() #0 {
; SZ13-NEXT: # kill: def $f0s killed $f0s def $v0
; SZ13-NEXT: vmrhf %v0, %v1, %v0
; SZ13-NEXT: vl %v1, 176(%r15), 3 # 16-byte Folded Reload
-; SZ13-NEXT: ld %f8, 192(%r15) # 8-byte Folded Reload
+; SZ13-NEXT: ld %f8, 192(%r15) # 8-byte Reload
; SZ13-NEXT: vrepf %v1, %v1, 0
; SZ13-NEXT: vmrhg %v24, %v0, %v1
; SZ13-NEXT: lmg %r14, %r15, 312(%r15)
@@ -1404,10 +1404,10 @@ define void @constrained_vector_pow_v3f64(ptr %a) #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -192
; S390X-NEXT: .cfi_def_cfa_offset 352
-; S390X-NEXT: std %f8, 184(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f11, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 184(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f11, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -1431,10 +1431,10 @@ define void @constrained_vector_pow_v3f64(ptr %a) #0 {
; S390X-NEXT: std %f0, 0(%r13)
; S390X-NEXT: std %f10, 8(%r13)
; S390X-NEXT: std %f11, 16(%r13)
-; S390X-NEXT: ld %f8, 184(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f11, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 184(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f11, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r13, %r15, 296(%r15)
; S390X-NEXT: br %r14
;
@@ -1446,8 +1446,8 @@ define void @constrained_vector_pow_v3f64(ptr %a) #0 {
; SZ13-NEXT: .cfi_offset %r15, -40
; SZ13-NEXT: aghi %r15, -208
; SZ13-NEXT: .cfi_def_cfa_offset 368
-; SZ13-NEXT: std %f8, 200(%r15) # 8-byte Folded Spill
-; SZ13-NEXT: std %f9, 192(%r15) # 8-byte Folded Spill
+; SZ13-NEXT: std %f8, 200(%r15) # 8-byte Spill
+; SZ13-NEXT: std %f9, 192(%r15) # 8-byte Spill
; SZ13-NEXT: .cfi_offset %f8, -168
; SZ13-NEXT: .cfi_offset %f9, -176
; SZ13-NEXT: larl %r1, .LCPI33_0
@@ -1475,8 +1475,8 @@ define void @constrained_vector_pow_v3f64(ptr %a) #0 {
; SZ13-NEXT: brasl %r14, pow at PLT
; SZ13-NEXT: std %f0, 16(%r13)
; SZ13-NEXT: vl %v0, 160(%r15), 3 # 16-byte Folded Reload
-; SZ13-NEXT: ld %f8, 200(%r15) # 8-byte Folded Reload
-; SZ13-NEXT: ld %f9, 192(%r15) # 8-byte Folded Reload
+; SZ13-NEXT: ld %f8, 200(%r15) # 8-byte Reload
+; SZ13-NEXT: ld %f9, 192(%r15) # 8-byte Reload
; SZ13-NEXT: vst %v0, 0(%r13), 4
; SZ13-NEXT: lmg %r13, %r15, 312(%r15)
; SZ13-NEXT: br %r14
@@ -1499,10 +1499,10 @@ define <4 x double> @constrained_vector_pow_v4f64() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -192
; S390X-NEXT: .cfi_def_cfa_offset 352
-; S390X-NEXT: std %f8, 184(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f11, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 184(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f11, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -1534,10 +1534,10 @@ define <4 x double> @constrained_vector_pow_v4f64() #0 {
; S390X-NEXT: ldr %f2, %f11
; S390X-NEXT: ldr %f4, %f10
; S390X-NEXT: ldr %f6, %f9
-; S390X-NEXT: ld %f8, 184(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f11, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 184(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f11, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 304(%r15)
; S390X-NEXT: br %r14
;
@@ -1548,7 +1548,7 @@ define <4 x double> @constrained_vector_pow_v4f64() #0 {
; SZ13-NEXT: .cfi_offset %r15, -40
; SZ13-NEXT: aghi %r15, -200
; SZ13-NEXT: .cfi_def_cfa_offset 360
-; SZ13-NEXT: std %f8, 192(%r15) # 8-byte Folded Spill
+; SZ13-NEXT: std %f8, 192(%r15) # 8-byte Spill
; SZ13-NEXT: .cfi_offset %f8, -168
; SZ13-NEXT: larl %r1, .LCPI34_0
; SZ13-NEXT: ld %f0, 0(%r1)
@@ -1578,7 +1578,7 @@ define <4 x double> @constrained_vector_pow_v4f64() #0 {
; SZ13-NEXT: brasl %r14, pow at PLT
; SZ13-NEXT: vl %v1, 176(%r15), 3 # 16-byte Folded Reload
; SZ13-NEXT: vl %v24, 160(%r15), 3 # 16-byte Folded Reload
-; SZ13-NEXT: ld %f8, 192(%r15) # 8-byte Folded Reload
+; SZ13-NEXT: ld %f8, 192(%r15) # 8-byte Reload
; SZ13-NEXT: # kill: def $f0d killed $f0d def $v0
; SZ13-NEXT: vmrhg %v26, %v0, %v1
; SZ13-NEXT: lmg %r14, %r15, 312(%r15)
@@ -1641,7 +1641,7 @@ define <2 x double> @constrained_vector_powi_v2f64() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -168
; S390X-NEXT: .cfi_def_cfa_offset 328
-; S390X-NEXT: std %f8, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: larl %r1, .LCPI36_0
; S390X-NEXT: ld %f0, 0(%r1)
@@ -1654,7 +1654,7 @@ define <2 x double> @constrained_vector_powi_v2f64() #0 {
; S390X-NEXT: ldr %f0, %f1
; S390X-NEXT: brasl %r14, __powidf2 at PLT
; S390X-NEXT: ldr %f2, %f8
-; S390X-NEXT: ld %f8, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 280(%r15)
; S390X-NEXT: br %r14
;
@@ -1697,8 +1697,8 @@ define <3 x float> @constrained_vector_powi_v3f32() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -176
; S390X-NEXT: .cfi_def_cfa_offset 336
-; S390X-NEXT: std %f8, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: larl %r1, .LCPI37_0
@@ -1719,8 +1719,8 @@ define <3 x float> @constrained_vector_powi_v3f32() #0 {
; S390X-NEXT: brasl %r14, __powisf2 at PLT
; S390X-NEXT: ler %f2, %f9
; S390X-NEXT: ler %f4, %f8
-; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 288(%r15)
; S390X-NEXT: br %r14
;
@@ -1773,8 +1773,8 @@ define void @constrained_vector_powi_v3f64(ptr %a) #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -176
; S390X-NEXT: .cfi_def_cfa_offset 336
-; S390X-NEXT: std %f8, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: larl %r1, .LCPI38_0
@@ -1797,8 +1797,8 @@ define void @constrained_vector_powi_v3f64(ptr %a) #0 {
; S390X-NEXT: std %f0, 16(%r13)
; S390X-NEXT: std %f9, 8(%r13)
; S390X-NEXT: std %f8, 0(%r13)
-; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r13, %r15, 280(%r15)
; S390X-NEXT: br %r14
;
@@ -1853,9 +1853,9 @@ define <4 x double> @constrained_vector_powi_v4f64() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -1884,9 +1884,9 @@ define <4 x double> @constrained_vector_powi_v4f64() #0 {
; S390X-NEXT: ldr %f2, %f10
; S390X-NEXT: ldr %f4, %f9
; S390X-NEXT: ldr %f6, %f8
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 296(%r15)
; S390X-NEXT: br %r14
;
@@ -1981,7 +1981,7 @@ define <2 x double> @constrained_vector_sin_v2f64() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -168
; S390X-NEXT: .cfi_def_cfa_offset 328
-; S390X-NEXT: std %f8, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: larl %r1, .LCPI41_0
; S390X-NEXT: ld %f0, 0(%r1)
@@ -1992,7 +1992,7 @@ define <2 x double> @constrained_vector_sin_v2f64() #0 {
; S390X-NEXT: ldr %f0, %f1
; S390X-NEXT: brasl %r14, sin at PLT
; S390X-NEXT: ldr %f2, %f8
-; S390X-NEXT: ld %f8, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 280(%r15)
; S390X-NEXT: br %r14
;
@@ -2032,8 +2032,8 @@ define <3 x float> @constrained_vector_sin_v3f32() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -176
; S390X-NEXT: .cfi_def_cfa_offset 336
-; S390X-NEXT: std %f8, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: larl %r1, .LCPI42_0
@@ -2051,8 +2051,8 @@ define <3 x float> @constrained_vector_sin_v3f32() #0 {
; S390X-NEXT: brasl %r14, sinf at PLT
; S390X-NEXT: ler %f2, %f9
; S390X-NEXT: ler %f4, %f8
-; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 288(%r15)
; S390X-NEXT: br %r14
;
@@ -2101,9 +2101,9 @@ define void @constrained_vector_sin_v3f64(ptr %a) #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -2121,9 +2121,9 @@ define void @constrained_vector_sin_v3f64(ptr %a) #0 {
; S390X-NEXT: std %f0, 0(%r13)
; S390X-NEXT: std %f9, 8(%r13)
; S390X-NEXT: std %f10, 16(%r13)
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r13, %r15, 288(%r15)
; S390X-NEXT: br %r14
;
@@ -2135,7 +2135,7 @@ define void @constrained_vector_sin_v3f64(ptr %a) #0 {
; SZ13-NEXT: .cfi_offset %r15, -40
; SZ13-NEXT: aghi %r15, -200
; SZ13-NEXT: .cfi_def_cfa_offset 360
-; SZ13-NEXT: std %f8, 192(%r15) # 8-byte Folded Spill
+; SZ13-NEXT: std %f8, 192(%r15) # 8-byte Spill
; SZ13-NEXT: .cfi_offset %f8, -168
; SZ13-NEXT: vl %v0, 0(%r2), 4
; SZ13-NEXT: ld %f8, 16(%r2)
@@ -2157,7 +2157,7 @@ define void @constrained_vector_sin_v3f64(ptr %a) #0 {
; SZ13-NEXT: brasl %r14, sin at PLT
; SZ13-NEXT: std %f0, 16(%r13)
; SZ13-NEXT: vl %v0, 160(%r15), 3 # 16-byte Folded Reload
-; SZ13-NEXT: ld %f8, 192(%r15) # 8-byte Folded Reload
+; SZ13-NEXT: ld %f8, 192(%r15) # 8-byte Reload
; SZ13-NEXT: vst %v0, 0(%r13), 4
; SZ13-NEXT: lmg %r13, %r15, 304(%r15)
; SZ13-NEXT: br %r14
@@ -2179,9 +2179,9 @@ define <4 x double> @constrained_vector_sin_v4f64() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -2206,9 +2206,9 @@ define <4 x double> @constrained_vector_sin_v4f64() #0 {
; S390X-NEXT: ldr %f2, %f10
; S390X-NEXT: ldr %f4, %f9
; S390X-NEXT: ldr %f6, %f8
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 296(%r15)
; S390X-NEXT: br %r14
;
@@ -2298,7 +2298,7 @@ define <2 x double> @constrained_vector_cos_v2f64() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -168
; S390X-NEXT: .cfi_def_cfa_offset 328
-; S390X-NEXT: std %f8, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: larl %r1, .LCPI46_0
; S390X-NEXT: ld %f0, 0(%r1)
@@ -2309,7 +2309,7 @@ define <2 x double> @constrained_vector_cos_v2f64() #0 {
; S390X-NEXT: ldr %f0, %f1
; S390X-NEXT: brasl %r14, cos at PLT
; S390X-NEXT: ldr %f2, %f8
-; S390X-NEXT: ld %f8, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 280(%r15)
; S390X-NEXT: br %r14
;
@@ -2349,8 +2349,8 @@ define <3 x float> @constrained_vector_cos_v3f32() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -176
; S390X-NEXT: .cfi_def_cfa_offset 336
-; S390X-NEXT: std %f8, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: larl %r1, .LCPI47_0
@@ -2368,8 +2368,8 @@ define <3 x float> @constrained_vector_cos_v3f32() #0 {
; S390X-NEXT: brasl %r14, cosf at PLT
; S390X-NEXT: ler %f2, %f9
; S390X-NEXT: ler %f4, %f8
-; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 288(%r15)
; S390X-NEXT: br %r14
;
@@ -2418,9 +2418,9 @@ define void @constrained_vector_cos_v3f64(ptr %a) #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -2438,9 +2438,9 @@ define void @constrained_vector_cos_v3f64(ptr %a) #0 {
; S390X-NEXT: std %f0, 0(%r13)
; S390X-NEXT: std %f9, 8(%r13)
; S390X-NEXT: std %f10, 16(%r13)
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r13, %r15, 288(%r15)
; S390X-NEXT: br %r14
;
@@ -2452,7 +2452,7 @@ define void @constrained_vector_cos_v3f64(ptr %a) #0 {
; SZ13-NEXT: .cfi_offset %r15, -40
; SZ13-NEXT: aghi %r15, -200
; SZ13-NEXT: .cfi_def_cfa_offset 360
-; SZ13-NEXT: std %f8, 192(%r15) # 8-byte Folded Spill
+; SZ13-NEXT: std %f8, 192(%r15) # 8-byte Spill
; SZ13-NEXT: .cfi_offset %f8, -168
; SZ13-NEXT: vl %v0, 0(%r2), 4
; SZ13-NEXT: ld %f8, 16(%r2)
@@ -2474,7 +2474,7 @@ define void @constrained_vector_cos_v3f64(ptr %a) #0 {
; SZ13-NEXT: brasl %r14, cos at PLT
; SZ13-NEXT: std %f0, 16(%r13)
; SZ13-NEXT: vl %v0, 160(%r15), 3 # 16-byte Folded Reload
-; SZ13-NEXT: ld %f8, 192(%r15) # 8-byte Folded Reload
+; SZ13-NEXT: ld %f8, 192(%r15) # 8-byte Reload
; SZ13-NEXT: vst %v0, 0(%r13), 4
; SZ13-NEXT: lmg %r13, %r15, 304(%r15)
; SZ13-NEXT: br %r14
@@ -2496,9 +2496,9 @@ define <4 x double> @constrained_vector_cos_v4f64() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -2523,9 +2523,9 @@ define <4 x double> @constrained_vector_cos_v4f64() #0 {
; S390X-NEXT: ldr %f2, %f10
; S390X-NEXT: ldr %f4, %f9
; S390X-NEXT: ldr %f6, %f8
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 296(%r15)
; S390X-NEXT: br %r14
;
@@ -2615,7 +2615,7 @@ define <2 x double> @constrained_vector_exp_v2f64() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -168
; S390X-NEXT: .cfi_def_cfa_offset 328
-; S390X-NEXT: std %f8, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: larl %r1, .LCPI51_0
; S390X-NEXT: ld %f0, 0(%r1)
@@ -2626,7 +2626,7 @@ define <2 x double> @constrained_vector_exp_v2f64() #0 {
; S390X-NEXT: ldr %f0, %f1
; S390X-NEXT: brasl %r14, exp at PLT
; S390X-NEXT: ldr %f2, %f8
-; S390X-NEXT: ld %f8, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 280(%r15)
; S390X-NEXT: br %r14
;
@@ -2666,8 +2666,8 @@ define <3 x float> @constrained_vector_exp_v3f32() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -176
; S390X-NEXT: .cfi_def_cfa_offset 336
-; S390X-NEXT: std %f8, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: larl %r1, .LCPI52_0
@@ -2685,8 +2685,8 @@ define <3 x float> @constrained_vector_exp_v3f32() #0 {
; S390X-NEXT: brasl %r14, expf at PLT
; S390X-NEXT: ler %f2, %f9
; S390X-NEXT: ler %f4, %f8
-; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 288(%r15)
; S390X-NEXT: br %r14
;
@@ -2735,9 +2735,9 @@ define void @constrained_vector_exp_v3f64(ptr %a) #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -2755,9 +2755,9 @@ define void @constrained_vector_exp_v3f64(ptr %a) #0 {
; S390X-NEXT: std %f0, 0(%r13)
; S390X-NEXT: std %f9, 8(%r13)
; S390X-NEXT: std %f10, 16(%r13)
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r13, %r15, 288(%r15)
; S390X-NEXT: br %r14
;
@@ -2769,7 +2769,7 @@ define void @constrained_vector_exp_v3f64(ptr %a) #0 {
; SZ13-NEXT: .cfi_offset %r15, -40
; SZ13-NEXT: aghi %r15, -200
; SZ13-NEXT: .cfi_def_cfa_offset 360
-; SZ13-NEXT: std %f8, 192(%r15) # 8-byte Folded Spill
+; SZ13-NEXT: std %f8, 192(%r15) # 8-byte Spill
; SZ13-NEXT: .cfi_offset %f8, -168
; SZ13-NEXT: vl %v0, 0(%r2), 4
; SZ13-NEXT: ld %f8, 16(%r2)
@@ -2791,7 +2791,7 @@ define void @constrained_vector_exp_v3f64(ptr %a) #0 {
; SZ13-NEXT: brasl %r14, exp at PLT
; SZ13-NEXT: std %f0, 16(%r13)
; SZ13-NEXT: vl %v0, 160(%r15), 3 # 16-byte Folded Reload
-; SZ13-NEXT: ld %f8, 192(%r15) # 8-byte Folded Reload
+; SZ13-NEXT: ld %f8, 192(%r15) # 8-byte Reload
; SZ13-NEXT: vst %v0, 0(%r13), 4
; SZ13-NEXT: lmg %r13, %r15, 304(%r15)
; SZ13-NEXT: br %r14
@@ -2813,9 +2813,9 @@ define <4 x double> @constrained_vector_exp_v4f64() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -2840,9 +2840,9 @@ define <4 x double> @constrained_vector_exp_v4f64() #0 {
; S390X-NEXT: ldr %f2, %f10
; S390X-NEXT: ldr %f4, %f9
; S390X-NEXT: ldr %f6, %f8
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 296(%r15)
; S390X-NEXT: br %r14
;
@@ -2932,7 +2932,7 @@ define <2 x double> @constrained_vector_exp2_v2f64() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -168
; S390X-NEXT: .cfi_def_cfa_offset 328
-; S390X-NEXT: std %f8, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: larl %r1, .LCPI56_0
; S390X-NEXT: ld %f0, 0(%r1)
@@ -2943,7 +2943,7 @@ define <2 x double> @constrained_vector_exp2_v2f64() #0 {
; S390X-NEXT: ldr %f0, %f1
; S390X-NEXT: brasl %r14, exp2 at PLT
; S390X-NEXT: ldr %f2, %f8
-; S390X-NEXT: ld %f8, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 280(%r15)
; S390X-NEXT: br %r14
;
@@ -2983,8 +2983,8 @@ define <3 x float> @constrained_vector_exp2_v3f32() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -176
; S390X-NEXT: .cfi_def_cfa_offset 336
-; S390X-NEXT: std %f8, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: larl %r1, .LCPI57_0
@@ -3002,8 +3002,8 @@ define <3 x float> @constrained_vector_exp2_v3f32() #0 {
; S390X-NEXT: brasl %r14, exp2f at PLT
; S390X-NEXT: ler %f2, %f9
; S390X-NEXT: ler %f4, %f8
-; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 288(%r15)
; S390X-NEXT: br %r14
;
@@ -3052,9 +3052,9 @@ define void @constrained_vector_exp2_v3f64(ptr %a) #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -3072,9 +3072,9 @@ define void @constrained_vector_exp2_v3f64(ptr %a) #0 {
; S390X-NEXT: std %f0, 0(%r13)
; S390X-NEXT: std %f9, 8(%r13)
; S390X-NEXT: std %f10, 16(%r13)
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r13, %r15, 288(%r15)
; S390X-NEXT: br %r14
;
@@ -3086,7 +3086,7 @@ define void @constrained_vector_exp2_v3f64(ptr %a) #0 {
; SZ13-NEXT: .cfi_offset %r15, -40
; SZ13-NEXT: aghi %r15, -200
; SZ13-NEXT: .cfi_def_cfa_offset 360
-; SZ13-NEXT: std %f8, 192(%r15) # 8-byte Folded Spill
+; SZ13-NEXT: std %f8, 192(%r15) # 8-byte Spill
; SZ13-NEXT: .cfi_offset %f8, -168
; SZ13-NEXT: vl %v0, 0(%r2), 4
; SZ13-NEXT: ld %f8, 16(%r2)
@@ -3108,7 +3108,7 @@ define void @constrained_vector_exp2_v3f64(ptr %a) #0 {
; SZ13-NEXT: brasl %r14, exp2 at PLT
; SZ13-NEXT: std %f0, 16(%r13)
; SZ13-NEXT: vl %v0, 160(%r15), 3 # 16-byte Folded Reload
-; SZ13-NEXT: ld %f8, 192(%r15) # 8-byte Folded Reload
+; SZ13-NEXT: ld %f8, 192(%r15) # 8-byte Reload
; SZ13-NEXT: vst %v0, 0(%r13), 4
; SZ13-NEXT: lmg %r13, %r15, 304(%r15)
; SZ13-NEXT: br %r14
@@ -3130,9 +3130,9 @@ define <4 x double> @constrained_vector_exp2_v4f64() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -3157,9 +3157,9 @@ define <4 x double> @constrained_vector_exp2_v4f64() #0 {
; S390X-NEXT: ldr %f2, %f10
; S390X-NEXT: ldr %f4, %f9
; S390X-NEXT: ldr %f6, %f8
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 296(%r15)
; S390X-NEXT: br %r14
;
@@ -3249,7 +3249,7 @@ define <2 x double> @constrained_vector_log_v2f64() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -168
; S390X-NEXT: .cfi_def_cfa_offset 328
-; S390X-NEXT: std %f8, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: larl %r1, .LCPI61_0
; S390X-NEXT: ld %f0, 0(%r1)
@@ -3260,7 +3260,7 @@ define <2 x double> @constrained_vector_log_v2f64() #0 {
; S390X-NEXT: ldr %f0, %f1
; S390X-NEXT: brasl %r14, log at PLT
; S390X-NEXT: ldr %f2, %f8
-; S390X-NEXT: ld %f8, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 280(%r15)
; S390X-NEXT: br %r14
;
@@ -3300,8 +3300,8 @@ define <3 x float> @constrained_vector_log_v3f32() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -176
; S390X-NEXT: .cfi_def_cfa_offset 336
-; S390X-NEXT: std %f8, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: larl %r1, .LCPI62_0
@@ -3319,8 +3319,8 @@ define <3 x float> @constrained_vector_log_v3f32() #0 {
; S390X-NEXT: brasl %r14, logf at PLT
; S390X-NEXT: ler %f2, %f9
; S390X-NEXT: ler %f4, %f8
-; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 288(%r15)
; S390X-NEXT: br %r14
;
@@ -3369,9 +3369,9 @@ define void @constrained_vector_log_v3f64(ptr %a) #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -3389,9 +3389,9 @@ define void @constrained_vector_log_v3f64(ptr %a) #0 {
; S390X-NEXT: std %f0, 0(%r13)
; S390X-NEXT: std %f9, 8(%r13)
; S390X-NEXT: std %f10, 16(%r13)
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r13, %r15, 288(%r15)
; S390X-NEXT: br %r14
;
@@ -3403,7 +3403,7 @@ define void @constrained_vector_log_v3f64(ptr %a) #0 {
; SZ13-NEXT: .cfi_offset %r15, -40
; SZ13-NEXT: aghi %r15, -200
; SZ13-NEXT: .cfi_def_cfa_offset 360
-; SZ13-NEXT: std %f8, 192(%r15) # 8-byte Folded Spill
+; SZ13-NEXT: std %f8, 192(%r15) # 8-byte Spill
; SZ13-NEXT: .cfi_offset %f8, -168
; SZ13-NEXT: vl %v0, 0(%r2), 4
; SZ13-NEXT: ld %f8, 16(%r2)
@@ -3425,7 +3425,7 @@ define void @constrained_vector_log_v3f64(ptr %a) #0 {
; SZ13-NEXT: brasl %r14, log at PLT
; SZ13-NEXT: std %f0, 16(%r13)
; SZ13-NEXT: vl %v0, 160(%r15), 3 # 16-byte Folded Reload
-; SZ13-NEXT: ld %f8, 192(%r15) # 8-byte Folded Reload
+; SZ13-NEXT: ld %f8, 192(%r15) # 8-byte Reload
; SZ13-NEXT: vst %v0, 0(%r13), 4
; SZ13-NEXT: lmg %r13, %r15, 304(%r15)
; SZ13-NEXT: br %r14
@@ -3447,9 +3447,9 @@ define <4 x double> @constrained_vector_log_v4f64() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -3474,9 +3474,9 @@ define <4 x double> @constrained_vector_log_v4f64() #0 {
; S390X-NEXT: ldr %f2, %f10
; S390X-NEXT: ldr %f4, %f9
; S390X-NEXT: ldr %f6, %f8
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 296(%r15)
; S390X-NEXT: br %r14
;
@@ -3566,7 +3566,7 @@ define <2 x double> @constrained_vector_log10_v2f64() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -168
; S390X-NEXT: .cfi_def_cfa_offset 328
-; S390X-NEXT: std %f8, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: larl %r1, .LCPI66_0
; S390X-NEXT: ld %f0, 0(%r1)
@@ -3577,7 +3577,7 @@ define <2 x double> @constrained_vector_log10_v2f64() #0 {
; S390X-NEXT: ldr %f0, %f1
; S390X-NEXT: brasl %r14, log10 at PLT
; S390X-NEXT: ldr %f2, %f8
-; S390X-NEXT: ld %f8, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 280(%r15)
; S390X-NEXT: br %r14
;
@@ -3617,8 +3617,8 @@ define <3 x float> @constrained_vector_log10_v3f32() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -176
; S390X-NEXT: .cfi_def_cfa_offset 336
-; S390X-NEXT: std %f8, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: larl %r1, .LCPI67_0
@@ -3636,8 +3636,8 @@ define <3 x float> @constrained_vector_log10_v3f32() #0 {
; S390X-NEXT: brasl %r14, log10f at PLT
; S390X-NEXT: ler %f2, %f9
; S390X-NEXT: ler %f4, %f8
-; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 288(%r15)
; S390X-NEXT: br %r14
;
@@ -3686,9 +3686,9 @@ define void @constrained_vector_log10_v3f64(ptr %a) #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -3706,9 +3706,9 @@ define void @constrained_vector_log10_v3f64(ptr %a) #0 {
; S390X-NEXT: std %f0, 0(%r13)
; S390X-NEXT: std %f9, 8(%r13)
; S390X-NEXT: std %f10, 16(%r13)
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r13, %r15, 288(%r15)
; S390X-NEXT: br %r14
;
@@ -3720,7 +3720,7 @@ define void @constrained_vector_log10_v3f64(ptr %a) #0 {
; SZ13-NEXT: .cfi_offset %r15, -40
; SZ13-NEXT: aghi %r15, -200
; SZ13-NEXT: .cfi_def_cfa_offset 360
-; SZ13-NEXT: std %f8, 192(%r15) # 8-byte Folded Spill
+; SZ13-NEXT: std %f8, 192(%r15) # 8-byte Spill
; SZ13-NEXT: .cfi_offset %f8, -168
; SZ13-NEXT: vl %v0, 0(%r2), 4
; SZ13-NEXT: ld %f8, 16(%r2)
@@ -3742,7 +3742,7 @@ define void @constrained_vector_log10_v3f64(ptr %a) #0 {
; SZ13-NEXT: brasl %r14, log10 at PLT
; SZ13-NEXT: std %f0, 16(%r13)
; SZ13-NEXT: vl %v0, 160(%r15), 3 # 16-byte Folded Reload
-; SZ13-NEXT: ld %f8, 192(%r15) # 8-byte Folded Reload
+; SZ13-NEXT: ld %f8, 192(%r15) # 8-byte Reload
; SZ13-NEXT: vst %v0, 0(%r13), 4
; SZ13-NEXT: lmg %r13, %r15, 304(%r15)
; SZ13-NEXT: br %r14
@@ -3764,9 +3764,9 @@ define <4 x double> @constrained_vector_log10_v4f64() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -3791,9 +3791,9 @@ define <4 x double> @constrained_vector_log10_v4f64() #0 {
; S390X-NEXT: ldr %f2, %f10
; S390X-NEXT: ldr %f4, %f9
; S390X-NEXT: ldr %f6, %f8
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 296(%r15)
; S390X-NEXT: br %r14
;
@@ -3883,7 +3883,7 @@ define <2 x double> @constrained_vector_log2_v2f64() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -168
; S390X-NEXT: .cfi_def_cfa_offset 328
-; S390X-NEXT: std %f8, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: larl %r1, .LCPI71_0
; S390X-NEXT: ld %f0, 0(%r1)
@@ -3894,7 +3894,7 @@ define <2 x double> @constrained_vector_log2_v2f64() #0 {
; S390X-NEXT: ldr %f0, %f1
; S390X-NEXT: brasl %r14, log2 at PLT
; S390X-NEXT: ldr %f2, %f8
-; S390X-NEXT: ld %f8, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 280(%r15)
; S390X-NEXT: br %r14
;
@@ -3934,8 +3934,8 @@ define <3 x float> @constrained_vector_log2_v3f32() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -176
; S390X-NEXT: .cfi_def_cfa_offset 336
-; S390X-NEXT: std %f8, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: larl %r1, .LCPI72_0
@@ -3953,8 +3953,8 @@ define <3 x float> @constrained_vector_log2_v3f32() #0 {
; S390X-NEXT: brasl %r14, log2f at PLT
; S390X-NEXT: ler %f2, %f9
; S390X-NEXT: ler %f4, %f8
-; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 288(%r15)
; S390X-NEXT: br %r14
;
@@ -4003,9 +4003,9 @@ define void @constrained_vector_log2_v3f64(ptr %a) #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -4023,9 +4023,9 @@ define void @constrained_vector_log2_v3f64(ptr %a) #0 {
; S390X-NEXT: std %f0, 0(%r13)
; S390X-NEXT: std %f9, 8(%r13)
; S390X-NEXT: std %f10, 16(%r13)
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r13, %r15, 288(%r15)
; S390X-NEXT: br %r14
;
@@ -4037,7 +4037,7 @@ define void @constrained_vector_log2_v3f64(ptr %a) #0 {
; SZ13-NEXT: .cfi_offset %r15, -40
; SZ13-NEXT: aghi %r15, -200
; SZ13-NEXT: .cfi_def_cfa_offset 360
-; SZ13-NEXT: std %f8, 192(%r15) # 8-byte Folded Spill
+; SZ13-NEXT: std %f8, 192(%r15) # 8-byte Spill
; SZ13-NEXT: .cfi_offset %f8, -168
; SZ13-NEXT: vl %v0, 0(%r2), 4
; SZ13-NEXT: ld %f8, 16(%r2)
@@ -4059,7 +4059,7 @@ define void @constrained_vector_log2_v3f64(ptr %a) #0 {
; SZ13-NEXT: brasl %r14, log2 at PLT
; SZ13-NEXT: std %f0, 16(%r13)
; SZ13-NEXT: vl %v0, 160(%r15), 3 # 16-byte Folded Reload
-; SZ13-NEXT: ld %f8, 192(%r15) # 8-byte Folded Reload
+; SZ13-NEXT: ld %f8, 192(%r15) # 8-byte Reload
; SZ13-NEXT: vst %v0, 0(%r13), 4
; SZ13-NEXT: lmg %r13, %r15, 304(%r15)
; SZ13-NEXT: br %r14
@@ -4081,9 +4081,9 @@ define <4 x double> @constrained_vector_log2_v4f64() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -4108,9 +4108,9 @@ define <4 x double> @constrained_vector_log2_v4f64() #0 {
; S390X-NEXT: ldr %f2, %f10
; S390X-NEXT: ldr %f4, %f9
; S390X-NEXT: ldr %f6, %f8
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 296(%r15)
; S390X-NEXT: br %r14
;
@@ -4334,8 +4334,8 @@ define <2 x double> @constrained_vector_nearbyint_v2f64(ptr %a) #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -176
; S390X-NEXT: .cfi_def_cfa_offset 336
-; S390X-NEXT: std %f8, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: ld %f0, 8(%r2)
@@ -4345,8 +4345,8 @@ define <2 x double> @constrained_vector_nearbyint_v2f64(ptr %a) #0 {
; S390X-NEXT: ldr %f0, %f8
; S390X-NEXT: brasl %r14, nearbyint at PLT
; S390X-NEXT: ldr %f2, %f9
-; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 288(%r15)
; S390X-NEXT: br %r14
;
@@ -4372,9 +4372,9 @@ define <3 x float> @constrained_vector_nearbyint_v3f32(ptr %a) #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -4393,9 +4393,9 @@ define <3 x float> @constrained_vector_nearbyint_v3f32(ptr %a) #0 {
; S390X-NEXT: brasl %r14, nearbyintf at PLT
; S390X-NEXT: ler %f2, %f9
; S390X-NEXT: ler %f4, %f10
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 296(%r15)
; S390X-NEXT: br %r14
;
@@ -4429,9 +4429,9 @@ define void @constrained_vector_nearbyint_v3f64(ptr %a) #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -4449,9 +4449,9 @@ define void @constrained_vector_nearbyint_v3f64(ptr %a) #0 {
; S390X-NEXT: std %f0, 0(%r13)
; S390X-NEXT: std %f9, 8(%r13)
; S390X-NEXT: std %f10, 16(%r13)
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r13, %r15, 288(%r15)
; S390X-NEXT: br %r14
;
@@ -4482,10 +4482,10 @@ define <4 x double> @constrained_vector_nearbyint_v4f64(ptr %a) #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -192
; S390X-NEXT: .cfi_def_cfa_offset 352
-; S390X-NEXT: std %f8, 184(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f11, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 184(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f11, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -4507,10 +4507,10 @@ define <4 x double> @constrained_vector_nearbyint_v4f64(ptr %a) #0 {
; S390X-NEXT: ldr %f2, %f9
; S390X-NEXT: ldr %f4, %f10
; S390X-NEXT: ldr %f6, %f11
-; S390X-NEXT: ld %f8, 184(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f11, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 184(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f11, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 304(%r15)
; S390X-NEXT: br %r14
;
@@ -4577,7 +4577,7 @@ define <2 x double> @constrained_vector_maxnum_v2f64() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -168
; S390X-NEXT: .cfi_def_cfa_offset 328
-; S390X-NEXT: std %f8, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: larl %r1, .LCPI86_0
; S390X-NEXT: ld %f0, 0(%r1)
@@ -4592,7 +4592,7 @@ define <2 x double> @constrained_vector_maxnum_v2f64() #0 {
; S390X-NEXT: ldr %f0, %f1
; S390X-NEXT: brasl %r14, fmax at PLT
; S390X-NEXT: ldr %f2, %f8
-; S390X-NEXT: ld %f8, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 280(%r15)
; S390X-NEXT: br %r14
;
@@ -4636,9 +4636,9 @@ define <3 x float> @constrained_vector_maxnum_v3f32() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -4662,9 +4662,9 @@ define <3 x float> @constrained_vector_maxnum_v3f32() #0 {
; S390X-NEXT: brasl %r14, fmaxf at PLT
; S390X-NEXT: ler %f2, %f10
; S390X-NEXT: ler %f4, %f9
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 296(%r15)
; S390X-NEXT: br %r14
;
@@ -4675,7 +4675,7 @@ define <3 x float> @constrained_vector_maxnum_v3f32() #0 {
; SZ13-NEXT: .cfi_offset %r15, -40
; SZ13-NEXT: aghi %r15, -200
; SZ13-NEXT: .cfi_def_cfa_offset 360
-; SZ13-NEXT: std %f8, 192(%r15) # 8-byte Folded Spill
+; SZ13-NEXT: std %f8, 192(%r15) # 8-byte Spill
; SZ13-NEXT: .cfi_offset %f8, -168
; SZ13-NEXT: larl %r1, .LCPI87_0
; SZ13-NEXT: lde %f0, 0(%r1)
@@ -4700,7 +4700,7 @@ define <3 x float> @constrained_vector_maxnum_v3f32() #0 {
; SZ13-NEXT: # kill: def $f0s killed $f0s def $v0
; SZ13-NEXT: vmrhf %v0, %v1, %v0
; SZ13-NEXT: vl %v1, 176(%r15), 3 # 16-byte Folded Reload
-; SZ13-NEXT: ld %f8, 192(%r15) # 8-byte Folded Reload
+; SZ13-NEXT: ld %f8, 192(%r15) # 8-byte Reload
; SZ13-NEXT: vrepf %v1, %v1, 0
; SZ13-NEXT: vmrhg %v24, %v0, %v1
; SZ13-NEXT: lmg %r14, %r15, 312(%r15)
@@ -4722,9 +4722,9 @@ define void @constrained_vector_log10_maxnum_v3f64(ptr %a) #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -4748,9 +4748,9 @@ define void @constrained_vector_log10_maxnum_v3f64(ptr %a) #0 {
; S390X-NEXT: std %f0, 0(%r13)
; S390X-NEXT: std %f9, 8(%r13)
; S390X-NEXT: std %f10, 16(%r13)
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r13, %r15, 288(%r15)
; S390X-NEXT: br %r14
;
@@ -4762,7 +4762,7 @@ define void @constrained_vector_log10_maxnum_v3f64(ptr %a) #0 {
; SZ13-NEXT: .cfi_offset %r15, -40
; SZ13-NEXT: aghi %r15, -200
; SZ13-NEXT: .cfi_def_cfa_offset 360
-; SZ13-NEXT: std %f8, 192(%r15) # 8-byte Folded Spill
+; SZ13-NEXT: std %f8, 192(%r15) # 8-byte Spill
; SZ13-NEXT: .cfi_offset %f8, -168
; SZ13-NEXT: larl %r1, .LCPI88_0
; SZ13-NEXT: vl %v0, 0(%r2), 4
@@ -4790,7 +4790,7 @@ define void @constrained_vector_log10_maxnum_v3f64(ptr %a) #0 {
; SZ13-NEXT: brasl %r14, fmax at PLT
; SZ13-NEXT: std %f0, 16(%r13)
; SZ13-NEXT: vl %v0, 160(%r15), 3 # 16-byte Folded Reload
-; SZ13-NEXT: ld %f8, 192(%r15) # 8-byte Folded Reload
+; SZ13-NEXT: ld %f8, 192(%r15) # 8-byte Reload
; SZ13-NEXT: vst %v0, 0(%r13), 4
; SZ13-NEXT: lmg %r13, %r15, 304(%r15)
; SZ13-NEXT: br %r14
@@ -4812,9 +4812,9 @@ define <4 x double> @constrained_vector_maxnum_v4f64() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -4847,9 +4847,9 @@ define <4 x double> @constrained_vector_maxnum_v4f64() #0 {
; S390X-NEXT: ldr %f2, %f10
; S390X-NEXT: ldr %f4, %f9
; S390X-NEXT: ldr %f6, %f8
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 296(%r15)
; S390X-NEXT: br %r14
;
@@ -4951,7 +4951,7 @@ define <2 x double> @constrained_vector_minnum_v2f64() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -168
; S390X-NEXT: .cfi_def_cfa_offset 328
-; S390X-NEXT: std %f8, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: larl %r1, .LCPI91_0
; S390X-NEXT: ld %f0, 0(%r1)
@@ -4966,7 +4966,7 @@ define <2 x double> @constrained_vector_minnum_v2f64() #0 {
; S390X-NEXT: ldr %f0, %f1
; S390X-NEXT: brasl %r14, fmin at PLT
; S390X-NEXT: ldr %f2, %f8
-; S390X-NEXT: ld %f8, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 280(%r15)
; S390X-NEXT: br %r14
;
@@ -5010,9 +5010,9 @@ define <3 x float> @constrained_vector_minnum_v3f32() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -5036,9 +5036,9 @@ define <3 x float> @constrained_vector_minnum_v3f32() #0 {
; S390X-NEXT: brasl %r14, fminf at PLT
; S390X-NEXT: ler %f2, %f10
; S390X-NEXT: ler %f4, %f9
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 296(%r15)
; S390X-NEXT: br %r14
;
@@ -5049,7 +5049,7 @@ define <3 x float> @constrained_vector_minnum_v3f32() #0 {
; SZ13-NEXT: .cfi_offset %r15, -40
; SZ13-NEXT: aghi %r15, -200
; SZ13-NEXT: .cfi_def_cfa_offset 360
-; SZ13-NEXT: std %f8, 192(%r15) # 8-byte Folded Spill
+; SZ13-NEXT: std %f8, 192(%r15) # 8-byte Spill
; SZ13-NEXT: .cfi_offset %f8, -168
; SZ13-NEXT: larl %r1, .LCPI92_0
; SZ13-NEXT: lde %f0, 0(%r1)
@@ -5074,7 +5074,7 @@ define <3 x float> @constrained_vector_minnum_v3f32() #0 {
; SZ13-NEXT: # kill: def $f0s killed $f0s def $v0
; SZ13-NEXT: vmrhf %v0, %v1, %v0
; SZ13-NEXT: vl %v1, 176(%r15), 3 # 16-byte Folded Reload
-; SZ13-NEXT: ld %f8, 192(%r15) # 8-byte Folded Reload
+; SZ13-NEXT: ld %f8, 192(%r15) # 8-byte Reload
; SZ13-NEXT: vrepf %v1, %v1, 0
; SZ13-NEXT: vmrhg %v24, %v0, %v1
; SZ13-NEXT: lmg %r14, %r15, 312(%r15)
@@ -5096,10 +5096,10 @@ define void @constrained_vector_minnum_v3f64(ptr %a) #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -192
; S390X-NEXT: .cfi_def_cfa_offset 352
-; S390X-NEXT: std %f8, 184(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f11, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 184(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f11, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -5123,10 +5123,10 @@ define void @constrained_vector_minnum_v3f64(ptr %a) #0 {
; S390X-NEXT: std %f0, 0(%r13)
; S390X-NEXT: std %f10, 8(%r13)
; S390X-NEXT: std %f11, 16(%r13)
-; S390X-NEXT: ld %f8, 184(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f11, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 184(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f11, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r13, %r15, 296(%r15)
; S390X-NEXT: br %r14
;
@@ -5138,8 +5138,8 @@ define void @constrained_vector_minnum_v3f64(ptr %a) #0 {
; SZ13-NEXT: .cfi_offset %r15, -40
; SZ13-NEXT: aghi %r15, -208
; SZ13-NEXT: .cfi_def_cfa_offset 368
-; SZ13-NEXT: std %f8, 200(%r15) # 8-byte Folded Spill
-; SZ13-NEXT: std %f9, 192(%r15) # 8-byte Folded Spill
+; SZ13-NEXT: std %f8, 200(%r15) # 8-byte Spill
+; SZ13-NEXT: std %f9, 192(%r15) # 8-byte Spill
; SZ13-NEXT: .cfi_offset %f8, -168
; SZ13-NEXT: .cfi_offset %f9, -176
; SZ13-NEXT: larl %r1, .LCPI93_0
@@ -5167,8 +5167,8 @@ define void @constrained_vector_minnum_v3f64(ptr %a) #0 {
; SZ13-NEXT: brasl %r14, fmin at PLT
; SZ13-NEXT: std %f0, 16(%r13)
; SZ13-NEXT: vl %v0, 160(%r15), 3 # 16-byte Folded Reload
-; SZ13-NEXT: ld %f8, 200(%r15) # 8-byte Folded Reload
-; SZ13-NEXT: ld %f9, 192(%r15) # 8-byte Folded Reload
+; SZ13-NEXT: ld %f8, 200(%r15) # 8-byte Reload
+; SZ13-NEXT: ld %f9, 192(%r15) # 8-byte Reload
; SZ13-NEXT: vst %v0, 0(%r13), 4
; SZ13-NEXT: lmg %r13, %r15, 312(%r15)
; SZ13-NEXT: br %r14
@@ -5190,9 +5190,9 @@ define <4 x double> @constrained_vector_minnum_v4f64() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -5225,9 +5225,9 @@ define <4 x double> @constrained_vector_minnum_v4f64() #0 {
; S390X-NEXT: ldr %f2, %f10
; S390X-NEXT: ldr %f4, %f9
; S390X-NEXT: ldr %f6, %f8
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 296(%r15)
; S390X-NEXT: br %r14
;
@@ -5567,8 +5567,8 @@ define <2 x double> @constrained_vector_ceil_v2f64(ptr %a) #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -176
; S390X-NEXT: .cfi_def_cfa_offset 336
-; S390X-NEXT: std %f8, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: ld %f0, 8(%r2)
@@ -5578,8 +5578,8 @@ define <2 x double> @constrained_vector_ceil_v2f64(ptr %a) #0 {
; S390X-NEXT: ldr %f0, %f8
; S390X-NEXT: brasl %r14, ceil at PLT
; S390X-NEXT: ldr %f2, %f9
-; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 288(%r15)
; S390X-NEXT: br %r14
;
@@ -5604,9 +5604,9 @@ define <3 x float> @constrained_vector_ceil_v3f32(ptr %a) #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -5625,9 +5625,9 @@ define <3 x float> @constrained_vector_ceil_v3f32(ptr %a) #0 {
; S390X-NEXT: brasl %r14, ceilf at PLT
; S390X-NEXT: ler %f2, %f9
; S390X-NEXT: ler %f4, %f10
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 296(%r15)
; S390X-NEXT: br %r14
;
@@ -5660,9 +5660,9 @@ define void @constrained_vector_ceil_v3f64(ptr %a) #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -5680,9 +5680,9 @@ define void @constrained_vector_ceil_v3f64(ptr %a) #0 {
; S390X-NEXT: std %f0, 0(%r13)
; S390X-NEXT: std %f9, 8(%r13)
; S390X-NEXT: std %f10, 16(%r13)
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r13, %r15, 288(%r15)
; S390X-NEXT: br %r14
;
@@ -5740,8 +5740,8 @@ define <2 x double> @constrained_vector_floor_v2f64(ptr %a) #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -176
; S390X-NEXT: .cfi_def_cfa_offset 336
-; S390X-NEXT: std %f8, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: ld %f0, 8(%r2)
@@ -5751,8 +5751,8 @@ define <2 x double> @constrained_vector_floor_v2f64(ptr %a) #0 {
; S390X-NEXT: ldr %f0, %f8
; S390X-NEXT: brasl %r14, floor at PLT
; S390X-NEXT: ldr %f2, %f9
-; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 288(%r15)
; S390X-NEXT: br %r14
;
@@ -5777,9 +5777,9 @@ define <3 x float> @constrained_vector_floor_v3f32(ptr %a) #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -5798,9 +5798,9 @@ define <3 x float> @constrained_vector_floor_v3f32(ptr %a) #0 {
; S390X-NEXT: brasl %r14, floorf at PLT
; S390X-NEXT: ler %f2, %f9
; S390X-NEXT: ler %f4, %f10
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 296(%r15)
; S390X-NEXT: br %r14
;
@@ -5833,9 +5833,9 @@ define void @constrained_vector_floor_v3f64(ptr %a) #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -5853,9 +5853,9 @@ define void @constrained_vector_floor_v3f64(ptr %a) #0 {
; S390X-NEXT: std %f0, 0(%r13)
; S390X-NEXT: std %f9, 8(%r13)
; S390X-NEXT: std %f10, 16(%r13)
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r13, %r15, 288(%r15)
; S390X-NEXT: br %r14
;
@@ -5912,8 +5912,8 @@ define <2 x double> @constrained_vector_round_v2f64(ptr %a) #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -176
; S390X-NEXT: .cfi_def_cfa_offset 336
-; S390X-NEXT: std %f8, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: ld %f0, 8(%r2)
@@ -5923,8 +5923,8 @@ define <2 x double> @constrained_vector_round_v2f64(ptr %a) #0 {
; S390X-NEXT: ldr %f0, %f8
; S390X-NEXT: brasl %r14, round at PLT
; S390X-NEXT: ldr %f2, %f9
-; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 288(%r15)
; S390X-NEXT: br %r14
;
@@ -5949,9 +5949,9 @@ define <3 x float> @constrained_vector_round_v3f32(ptr %a) #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -5970,9 +5970,9 @@ define <3 x float> @constrained_vector_round_v3f32(ptr %a) #0 {
; S390X-NEXT: brasl %r14, roundf at PLT
; S390X-NEXT: ler %f2, %f9
; S390X-NEXT: ler %f4, %f10
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 296(%r15)
; S390X-NEXT: br %r14
;
@@ -6006,9 +6006,9 @@ define void @constrained_vector_round_v3f64(ptr %a) #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -6026,9 +6026,9 @@ define void @constrained_vector_round_v3f64(ptr %a) #0 {
; S390X-NEXT: std %f0, 0(%r13)
; S390X-NEXT: std %f9, 8(%r13)
; S390X-NEXT: std %f10, 16(%r13)
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r13, %r15, 288(%r15)
; S390X-NEXT: br %r14
;
@@ -6085,8 +6085,8 @@ define <2 x double> @constrained_vector_roundeven_v2f64(ptr %a) #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -176
; S390X-NEXT: .cfi_def_cfa_offset 336
-; S390X-NEXT: std %f8, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: ld %f0, 8(%r2)
@@ -6096,8 +6096,8 @@ define <2 x double> @constrained_vector_roundeven_v2f64(ptr %a) #0 {
; S390X-NEXT: ldr %f0, %f8
; S390X-NEXT: brasl %r14, roundeven at PLT
; S390X-NEXT: ldr %f2, %f9
-; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 288(%r15)
; S390X-NEXT: br %r14
;
@@ -6122,9 +6122,9 @@ define <3 x float> @constrained_vector_roundeven_v3f32(ptr %a) #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -6143,9 +6143,9 @@ define <3 x float> @constrained_vector_roundeven_v3f32(ptr %a) #0 {
; S390X-NEXT: brasl %r14, roundevenf at PLT
; S390X-NEXT: ler %f2, %f9
; S390X-NEXT: ler %f4, %f10
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 296(%r15)
; S390X-NEXT: br %r14
;
@@ -6178,9 +6178,9 @@ define void @constrained_vector_roundeven_v3f64(ptr %a) #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -6198,9 +6198,9 @@ define void @constrained_vector_roundeven_v3f64(ptr %a) #0 {
; S390X-NEXT: std %f0, 0(%r13)
; S390X-NEXT: std %f9, 8(%r13)
; S390X-NEXT: std %f10, 16(%r13)
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r13, %r15, 288(%r15)
; S390X-NEXT: br %r14
;
@@ -6257,8 +6257,8 @@ define <2 x double> @constrained_vector_trunc_v2f64(ptr %a) #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -176
; S390X-NEXT: .cfi_def_cfa_offset 336
-; S390X-NEXT: std %f8, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: ld %f0, 8(%r2)
@@ -6268,8 +6268,8 @@ define <2 x double> @constrained_vector_trunc_v2f64(ptr %a) #0 {
; S390X-NEXT: ldr %f0, %f8
; S390X-NEXT: brasl %r14, trunc at PLT
; S390X-NEXT: ldr %f2, %f9
-; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 288(%r15)
; S390X-NEXT: br %r14
;
@@ -6294,9 +6294,9 @@ define <3 x float> @constrained_vector_trunc_v3f32(ptr %a) #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -6315,9 +6315,9 @@ define <3 x float> @constrained_vector_trunc_v3f32(ptr %a) #0 {
; S390X-NEXT: brasl %r14, truncf at PLT
; S390X-NEXT: ler %f2, %f9
; S390X-NEXT: ler %f4, %f10
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 296(%r15)
; S390X-NEXT: br %r14
;
@@ -6350,9 +6350,9 @@ define void @constrained_vector_trunc_v3f64(ptr %a) #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -6370,9 +6370,9 @@ define void @constrained_vector_trunc_v3f64(ptr %a) #0 {
; S390X-NEXT: std %f0, 0(%r13)
; S390X-NEXT: std %f9, 8(%r13)
; S390X-NEXT: std %f10, 16(%r13)
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r13, %r15, 288(%r15)
; S390X-NEXT: br %r14
;
@@ -6438,7 +6438,7 @@ define <2 x double> @constrained_vector_tan_v2f64() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -168
; S390X-NEXT: .cfi_def_cfa_offset 328
-; S390X-NEXT: std %f8, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: larl %r1, .LCPI124_0
; S390X-NEXT: ld %f0, 0(%r1)
@@ -6449,7 +6449,7 @@ define <2 x double> @constrained_vector_tan_v2f64() #0 {
; S390X-NEXT: ldr %f0, %f1
; S390X-NEXT: brasl %r14, tan at PLT
; S390X-NEXT: ldr %f2, %f8
-; S390X-NEXT: ld %f8, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 280(%r15)
; S390X-NEXT: br %r14
;
@@ -6489,8 +6489,8 @@ define <3 x float> @constrained_vector_tan_v3f32() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -176
; S390X-NEXT: .cfi_def_cfa_offset 336
-; S390X-NEXT: std %f8, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: larl %r1, .LCPI125_0
@@ -6508,8 +6508,8 @@ define <3 x float> @constrained_vector_tan_v3f32() #0 {
; S390X-NEXT: brasl %r14, tanf at PLT
; S390X-NEXT: ler %f2, %f9
; S390X-NEXT: ler %f4, %f8
-; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 288(%r15)
; S390X-NEXT: br %r14
;
@@ -6558,9 +6558,9 @@ define void @constrained_vector_tan_v3f64(ptr %a) #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -6578,9 +6578,9 @@ define void @constrained_vector_tan_v3f64(ptr %a) #0 {
; S390X-NEXT: std %f0, 0(%r13)
; S390X-NEXT: std %f9, 8(%r13)
; S390X-NEXT: std %f10, 16(%r13)
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r13, %r15, 288(%r15)
; S390X-NEXT: br %r14
;
@@ -6592,7 +6592,7 @@ define void @constrained_vector_tan_v3f64(ptr %a) #0 {
; SZ13-NEXT: .cfi_offset %r15, -40
; SZ13-NEXT: aghi %r15, -200
; SZ13-NEXT: .cfi_def_cfa_offset 360
-; SZ13-NEXT: std %f8, 192(%r15) # 8-byte Folded Spill
+; SZ13-NEXT: std %f8, 192(%r15) # 8-byte Spill
; SZ13-NEXT: .cfi_offset %f8, -168
; SZ13-NEXT: vl %v0, 0(%r2), 4
; SZ13-NEXT: ld %f8, 16(%r2)
@@ -6614,7 +6614,7 @@ define void @constrained_vector_tan_v3f64(ptr %a) #0 {
; SZ13-NEXT: brasl %r14, tan at PLT
; SZ13-NEXT: std %f0, 16(%r13)
; SZ13-NEXT: vl %v0, 160(%r15), 3 # 16-byte Folded Reload
-; SZ13-NEXT: ld %f8, 192(%r15) # 8-byte Folded Reload
+; SZ13-NEXT: ld %f8, 192(%r15) # 8-byte Reload
; SZ13-NEXT: vst %v0, 0(%r13), 4
; SZ13-NEXT: lmg %r13, %r15, 304(%r15)
; SZ13-NEXT: br %r14
@@ -6636,9 +6636,9 @@ define <4 x double> @constrained_vector_tan_v4f64() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -6663,9 +6663,9 @@ define <4 x double> @constrained_vector_tan_v4f64() #0 {
; S390X-NEXT: ldr %f2, %f10
; S390X-NEXT: ldr %f4, %f9
; S390X-NEXT: ldr %f6, %f8
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 296(%r15)
; S390X-NEXT: br %r14
;
@@ -6760,7 +6760,7 @@ define <2 x double> @constrained_vector_atan2_v2f64() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -168
; S390X-NEXT: .cfi_def_cfa_offset 328
-; S390X-NEXT: std %f8, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: larl %r1, .LCPI129_0
; S390X-NEXT: ld %f0, 0(%r1)
@@ -6775,7 +6775,7 @@ define <2 x double> @constrained_vector_atan2_v2f64() #0 {
; S390X-NEXT: ldr %f0, %f1
; S390X-NEXT: brasl %r14, atan2 at PLT
; S390X-NEXT: ldr %f2, %f8
-; S390X-NEXT: ld %f8, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 280(%r15)
; S390X-NEXT: br %r14
;
@@ -6820,8 +6820,8 @@ define <3 x float> @constrained_vector_atan2_v3f32() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -176
; S390X-NEXT: .cfi_def_cfa_offset 336
-; S390X-NEXT: std %f8, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: larl %r1, .LCPI130_0
@@ -6845,8 +6845,8 @@ define <3 x float> @constrained_vector_atan2_v3f32() #0 {
; S390X-NEXT: brasl %r14, atan2f at PLT
; S390X-NEXT: ler %f2, %f9
; S390X-NEXT: ler %f4, %f8
-; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 288(%r15)
; S390X-NEXT: br %r14
;
@@ -6902,11 +6902,11 @@ define void @constrained_vector_atan2_v3f64(ptr %a, ptr %b) #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -200
; S390X-NEXT: .cfi_def_cfa_offset 360
-; S390X-NEXT: std %f8, 192(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 184(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f11, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f12, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 192(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 184(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f11, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f12, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -6931,11 +6931,11 @@ define void @constrained_vector_atan2_v3f64(ptr %a, ptr %b) #0 {
; S390X-NEXT: std %f0, 0(%r13)
; S390X-NEXT: std %f9, 8(%r13)
; S390X-NEXT: std %f12, 16(%r13)
-; S390X-NEXT: ld %f8, 192(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 184(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f11, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f12, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 192(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 184(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f11, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f12, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r13, %r15, 304(%r15)
; S390X-NEXT: br %r14
;
@@ -6947,8 +6947,8 @@ define void @constrained_vector_atan2_v3f64(ptr %a, ptr %b) #0 {
; SZ13-NEXT: .cfi_offset %r15, -40
; SZ13-NEXT: aghi %r15, -224
; SZ13-NEXT: .cfi_def_cfa_offset 384
-; SZ13-NEXT: std %f8, 216(%r15) # 8-byte Folded Spill
-; SZ13-NEXT: std %f9, 208(%r15) # 8-byte Folded Spill
+; SZ13-NEXT: std %f8, 216(%r15) # 8-byte Spill
+; SZ13-NEXT: std %f9, 208(%r15) # 8-byte Spill
; SZ13-NEXT: .cfi_offset %f8, -168
; SZ13-NEXT: .cfi_offset %f9, -176
; SZ13-NEXT: vl %v0, 0(%r2), 4
@@ -6979,8 +6979,8 @@ define void @constrained_vector_atan2_v3f64(ptr %a, ptr %b) #0 {
; SZ13-NEXT: brasl %r14, atan2 at PLT
; SZ13-NEXT: std %f0, 16(%r13)
; SZ13-NEXT: vl %v0, 160(%r15), 3 # 16-byte Folded Reload
-; SZ13-NEXT: ld %f8, 216(%r15) # 8-byte Folded Reload
-; SZ13-NEXT: ld %f9, 208(%r15) # 8-byte Folded Reload
+; SZ13-NEXT: ld %f8, 216(%r15) # 8-byte Reload
+; SZ13-NEXT: ld %f9, 208(%r15) # 8-byte Reload
; SZ13-NEXT: vst %v0, 0(%r13), 4
; SZ13-NEXT: lmg %r13, %r15, 328(%r15)
; SZ13-NEXT: br %r14
@@ -7004,9 +7004,9 @@ define <4 x double> @constrained_vector_atan2_v4f64() #0 {
; S390X-NEXT: .cfi_offset %r15, -40
; S390X-NEXT: aghi %r15, -184
; S390X-NEXT: .cfi_def_cfa_offset 344
-; S390X-NEXT: std %f8, 176(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f9, 168(%r15) # 8-byte Folded Spill
-; S390X-NEXT: std %f10, 160(%r15) # 8-byte Folded Spill
+; S390X-NEXT: std %f8, 176(%r15) # 8-byte Spill
+; S390X-NEXT: std %f9, 168(%r15) # 8-byte Spill
+; S390X-NEXT: std %f10, 160(%r15) # 8-byte Spill
; S390X-NEXT: .cfi_offset %f8, -168
; S390X-NEXT: .cfi_offset %f9, -176
; S390X-NEXT: .cfi_offset %f10, -184
@@ -7039,9 +7039,9 @@ define <4 x double> @constrained_vector_atan2_v4f64() #0 {
; S390X-NEXT: ldr %f2, %f10
; S390X-NEXT: ldr %f4, %f9
; S390X-NEXT: ldr %f6, %f8
-; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Folded Reload
-; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Folded Reload
+; S390X-NEXT: ld %f8, 176(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f9, 168(%r15) # 8-byte Reload
+; S390X-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; S390X-NEXT: lmg %r14, %r15, 296(%r15)
; S390X-NEXT: br %r14
;
diff --git a/llvm/test/CodeGen/SystemZ/zos-prologue-epilog.ll b/llvm/test/CodeGen/SystemZ/zos-prologue-epilog.ll
index 40813a7bc18de..428d9bbe64c53 100644
--- a/llvm/test/CodeGen/SystemZ/zos-prologue-epilog.ll
+++ b/llvm/test/CodeGen/SystemZ/zos-prologue-epilog.ll
@@ -87,14 +87,14 @@ define void @func1(ptr %ptr) {
; CHECK-LABEL: func2
; CHECK64: stmg 6,7,1744(4)
; CHECK64: aghi 4,-320
-; CHECK64: std 15,{{[0-9]+}}(4) * 8-byte Folded Spill
-; CHECK64: std 14,{{[0-9]+}}(4) * 8-byte Folded Spill
-; CHECK64: std 13,{{[0-9]+}}(4) * 8-byte Folded Spill
-; CHECK64: std 12,{{[0-9]+}}(4) * 8-byte Folded Spill
-; CHECK64: std 11,{{[0-9]+}}(4) * 8-byte Folded Spill
-; CHECK64: std 10,{{[0-9]+}}(4) * 8-byte Folded Spill
-; CHECK64: std 9,{{[0-9]+}}(4) * 8-byte Folded Spill
-; CHECK64: std 8,{{[0-9]+}}(4) * 8-byte Folded Spill
+; CHECK64: std 15,{{[0-9]+}}(4) * 8-byte Spill
+; CHECK64: std 14,{{[0-9]+}}(4) * 8-byte Spill
+; CHECK64: std 13,{{[0-9]+}}(4) * 8-byte Spill
+; CHECK64: std 12,{{[0-9]+}}(4) * 8-byte Spill
+; CHECK64: std 11,{{[0-9]+}}(4) * 8-byte Spill
+; CHECK64: std 10,{{[0-9]+}}(4) * 8-byte Spill
+; CHECK64: std 9,{{[0-9]+}}(4) * 8-byte Spill
+; CHECK64: std 8,{{[0-9]+}}(4) * 8-byte Spill
; CHECK64: vst 23,{{[0-9]+}}(4),4 * 16-byte Folded Spill
; CHECK64: vst 22,{{[0-9]+}}(4),4 * 16-byte Folded Spill
; CHECK64: vst 21,{{[0-9]+}}(4),4 * 16-byte Folded Spill
@@ -104,14 +104,14 @@ define void @func1(ptr %ptr) {
; CHECK64: vst 17,{{[0-9]+}}(4),4 * 16-byte Folded Spill
; CHECK64: vst 16,{{[0-9]+}}(4),4 * 16-byte Folded Spill
-; CHECK64: ld 15,{{[0-9]+}}(4) * 8-byte Folded Reload
-; CHECK64: ld 14,{{[0-9]+}}(4) * 8-byte Folded Reload
-; CHECK64: ld 13,{{[0-9]+}}(4) * 8-byte Folded Reload
-; CHECK64: ld 12,{{[0-9]+}}(4) * 8-byte Folded Reload
-; CHECK64: ld 11,{{[0-9]+}}(4) * 8-byte Folded Reload
-; CHECK64: ld 10,{{[0-9]+}}(4) * 8-byte Folded Reload
-; CHECK64: ld 9,{{[0-9]+}}(4) * 8-byte Folded Reload
-; CHECK64: ld 8,{{[0-9]+}}(4) * 8-byte Folded Reload
+; CHECK64: ld 15,{{[0-9]+}}(4) * 8-byte Reload
+; CHECK64: ld 14,{{[0-9]+}}(4) * 8-byte Reload
+; CHECK64: ld 13,{{[0-9]+}}(4) * 8-byte Reload
+; CHECK64: ld 12,{{[0-9]+}}(4) * 8-byte Reload
+; CHECK64: ld 11,{{[0-9]+}}(4) * 8-byte Reload
+; CHECK64: ld 10,{{[0-9]+}}(4) * 8-byte Reload
+; CHECK64: ld 9,{{[0-9]+}}(4) * 8-byte Reload
+; CHECK64: ld 8,{{[0-9]+}}(4) * 8-byte Reload
; CHECK64: vl 23,{{[0-9]+}}(4),4 * 16-byte Folded Reload
; CHECK64: vl 22,{{[0-9]+}}(4),4 * 16-byte Folded Reload
; CHECK64: vl 21,{{[0-9]+}}(4),4 * 16-byte Folded Reload
More information about the llvm-commits
mailing list