[llvm] f541a50 - [SystemZ] Implement orderFrameObjects().

Jonas Paulsson via llvm-commits llvm-commits at lists.llvm.org
Thu Jan 27 14:11:16 PST 2022


Author: Jonas Paulsson
Date: 2022-01-27T16:09:19-06:00
New Revision: f541a5048a12b5fdd8a6ae1b6d1bd67366f00ecf

URL: https://github.com/llvm/llvm-project/commit/f541a5048a12b5fdd8a6ae1b6d1bd67366f00ecf
DIFF: https://github.com/llvm/llvm-project/commit/f541a5048a12b5fdd8a6ae1b6d1bd67366f00ecf.diff

LOG: [SystemZ] Implement orderFrameObjects().

By reordering the objects on the stack frame after looking at the users, a
better utilization of displacement operands will result. This means less
needed Load Address instructions for the accessing of these objects.

This is important for very large functions where otherwise small changes
could cause a lot more/less accesses go out of range.

Note: this is not yet enabled for SystemZXPLINKFrameLowering, but should be.

Review: Ulrich Weigand

Differential Revision: https://reviews.llvm.org/D115690

Added: 
    llvm/test/CodeGen/SystemZ/frame-27.mir

Modified: 
    llvm/lib/Target/SystemZ/SystemZFrameLowering.cpp
    llvm/lib/Target/SystemZ/SystemZFrameLowering.h
    llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
    llvm/lib/Target/SystemZ/SystemZInstrInfo.h
    llvm/test/CodeGen/SystemZ/args-11.ll
    llvm/test/CodeGen/SystemZ/foldmemop-imm-01.ll
    llvm/test/CodeGen/SystemZ/foldmemop-imm-02.mir
    llvm/test/CodeGen/SystemZ/foldmemop-msc.mir
    llvm/test/CodeGen/SystemZ/int-conv-01.ll
    llvm/test/CodeGen/SystemZ/int-conv-02.ll
    llvm/test/CodeGen/SystemZ/int-conv-06.ll
    llvm/test/CodeGen/SystemZ/vector-constrained-fp-intrinsics.ll
    llvm/test/DebugInfo/SystemZ/variable-loc.ll
    llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/systemz_generated_funcs.ll.generated.expected
    llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/systemz_generated_funcs.ll.nogenerated.expected

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/SystemZ/SystemZFrameLowering.cpp b/llvm/lib/Target/SystemZ/SystemZFrameLowering.cpp
index ccc7d0737f531..610627e7e3f08 100644
--- a/llvm/lib/Target/SystemZ/SystemZFrameLowering.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZFrameLowering.cpp
@@ -80,6 +80,88 @@ MachineBasicBlock::iterator SystemZFrameLowering::eliminateCallFramePseudoInstr(
   }
 }
 
+namespace {
+struct SZFrameSortingObj {
+  bool IsValid = false;     // True if we care about this Object.
+  uint32_t ObjectIndex = 0; // Index of Object into MFI list.
+  uint64_t ObjectSize = 0;  // Size of Object in bytes.
+  uint32_t D12Count = 0;    // 12-bit displacement only.
+  uint32_t DPairCount = 0;  // 12 or 20 bit displacement.
+};
+typedef std::vector<SZFrameSortingObj> SZFrameObjVec;
+} // namespace
+
+// TODO: Move to base class.
+void SystemZELFFrameLowering::orderFrameObjects(
+    const MachineFunction &MF, SmallVectorImpl<int> &ObjectsToAllocate) const {
+  const MachineFrameInfo &MFI = MF.getFrameInfo();
+  const SystemZInstrInfo *TII =
+      static_cast<const SystemZInstrInfo *>(MF.getSubtarget().getInstrInfo());
+
+  // Make a vector of sorting objects to track all MFI objects and mark those
+  // to be sorted as valid.
+  if (ObjectsToAllocate.size() <= 1)
+    return;
+  SZFrameObjVec SortingObjects(MFI.getObjectIndexEnd());
+  for (auto &Obj : ObjectsToAllocate) {
+    SortingObjects[Obj].IsValid = true;
+    SortingObjects[Obj].ObjectIndex = Obj;
+    SortingObjects[Obj].ObjectSize = MFI.getObjectSize(Obj);
+  }
+
+  // Examine uses for each object and record short (12-bit) and "pair"
+  // displacement types.
+  for (auto &MBB : MF)
+    for (auto &MI : MBB) {
+      if (MI.isDebugInstr())
+        continue;
+      for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
+        const MachineOperand &MO = MI.getOperand(I);
+        if (!MO.isFI())
+          continue;
+        int Index = MO.getIndex();
+        if (Index >= 0 && Index < MFI.getObjectIndexEnd() &&
+            SortingObjects[Index].IsValid) {
+          if (TII->hasDisplacementPairInsn(MI.getOpcode()))
+            SortingObjects[Index].DPairCount++;
+          else if (!(MI.getDesc().TSFlags & SystemZII::Has20BitOffset))
+            SortingObjects[Index].D12Count++;
+        }
+      }
+    }
+
+  // Sort all objects for short/paired displacements, which should be
+  // sufficient as it seems like all frame objects typically are within the
+  // long displacement range.  Sorting works by computing the "density" as
+  // Count / ObjectSize. The comparisons of two such fractions are refactored
+  // by multiplying both sides with A.ObjectSize * B.ObjectSize, in order to
+  // eliminate the (fp) divisions.  A higher density object needs to go after
+  // in the list in order for it to end up lower on the stack.
+  auto CmpD12 = [](const SZFrameSortingObj &A, const SZFrameSortingObj &B) {
+    // Put all invalid and variable sized objects at the end.
+    if (!A.IsValid || !B.IsValid)
+      return A.IsValid;
+    if (!A.ObjectSize || !B.ObjectSize)
+      return A.ObjectSize > 0;
+    uint64_t ADensityCmp = A.D12Count * B.ObjectSize;
+    uint64_t BDensityCmp = B.D12Count * A.ObjectSize;
+    if (ADensityCmp != BDensityCmp)
+      return ADensityCmp < BDensityCmp;
+    return A.DPairCount * B.ObjectSize < B.DPairCount * A.ObjectSize;
+  };
+  std::stable_sort(SortingObjects.begin(), SortingObjects.end(), CmpD12);
+
+  // Now modify the original list to represent the final order that
+  // we want.
+  unsigned Idx = 0;
+  for (auto &Obj : SortingObjects) {
+    // All invalid items are sorted at the end, so it's safe to stop.
+    if (!Obj.IsValid)
+      break;
+    ObjectsToAllocate[Idx++] = Obj.ObjectIndex;
+  }
+}
+
 bool SystemZFrameLowering::hasReservedCallFrame(
     const MachineFunction &MF) const {
   // The ELF ABI requires us to allocate 160 bytes of stack space for the

diff  --git a/llvm/lib/Target/SystemZ/SystemZFrameLowering.h b/llvm/lib/Target/SystemZ/SystemZFrameLowering.h
index 3a1af888d8f9f..2b3d7efed53b8 100644
--- a/llvm/lib/Target/SystemZ/SystemZFrameLowering.h
+++ b/llvm/lib/Target/SystemZ/SystemZFrameLowering.h
@@ -77,6 +77,9 @@ class SystemZELFFrameLowering : public SystemZFrameLowering {
   bool hasFP(const MachineFunction &MF) const override;
   StackOffset getFrameIndexReference(const MachineFunction &MF, int FI,
                                      Register &FrameReg) const override;
+  void
+  orderFrameObjects(const MachineFunction &MF,
+                    SmallVectorImpl<int> &ObjectsToAllocate) const override;
 
   // Return the byte offset from the incoming stack pointer of Reg's
   // ABI-defined save slot.  Return 0 if no slot is defined for Reg.  Adjust

diff  --git a/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp b/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
index 6db9bf3056b7f..4b6aa60f5d55a 100644
--- a/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
@@ -1652,6 +1652,13 @@ unsigned SystemZInstrInfo::getOpcodeForOffset(unsigned Opcode,
   return 0;
 }
 
+bool SystemZInstrInfo::hasDisplacementPairInsn(unsigned Opcode) const {
+  const MCInstrDesc &MCID = get(Opcode);
+  if (MCID.TSFlags & SystemZII::Has20BitOffset)
+    return SystemZ::getDisp12Opcode(Opcode) >= 0;
+  return SystemZ::getDisp20Opcode(Opcode) >= 0;
+}
+
 unsigned SystemZInstrInfo::getLoadAndTest(unsigned Opcode) const {
   switch (Opcode) {
   case SystemZ::L:      return SystemZ::LT;

diff  --git a/llvm/lib/Target/SystemZ/SystemZInstrInfo.h b/llvm/lib/Target/SystemZ/SystemZInstrInfo.h
index 396f56c7f59c0..9e5b2729a7072 100644
--- a/llvm/lib/Target/SystemZ/SystemZInstrInfo.h
+++ b/llvm/lib/Target/SystemZ/SystemZInstrInfo.h
@@ -312,6 +312,9 @@ class SystemZInstrInfo : public SystemZGenInstrInfo {
   // exists.
   unsigned getOpcodeForOffset(unsigned Opcode, int64_t Offset) const;
 
+  // Return true if Opcode has a mapping in 12 <-> 20 bit displacements.
+  bool hasDisplacementPairInsn(unsigned Opcode) const;
+
   // If Opcode is a load instruction that has a LOAD AND TEST form,
   // return the opcode for the testing form, otherwise return 0.
   unsigned getLoadAndTest(unsigned Opcode) const;

diff  --git a/llvm/test/CodeGen/SystemZ/args-11.ll b/llvm/test/CodeGen/SystemZ/args-11.ll
index b355f9d6da15c..f3bd51d23787f 100644
--- a/llvm/test/CodeGen/SystemZ/args-11.ll
+++ b/llvm/test/CodeGen/SystemZ/args-11.ll
@@ -13,12 +13,12 @@ define i32 @fn2() {
 ; CHECK-NEXT:    .cfi_offset %r15, -40
 ; CHECK-NEXT:    aghi %r15, -184
 ; CHECK-NEXT:    .cfi_def_cfa_offset 344
-; CHECK-NEXT:    mvhi 180(%r15), -1
+; CHECK-NEXT:    mvhi 164(%r15), -1
+; CHECK-NEXT:    mvghi 176(%r15), 0
+; CHECK-NEXT:    la %r2, 168(%r15)
 ; CHECK-NEXT:    mvghi 168(%r15), 0
-; CHECK-NEXT:    la %r2, 160(%r15)
-; CHECK-NEXT:    mvghi 160(%r15), 0
 ; CHECK-NEXT:    brasl %r14, fn1 at PLT
-; CHECK-NEXT:    l %r2, 180(%r15)
+; CHECK-NEXT:    l %r2, 164(%r15)
 ; CHECK-NEXT:    lmg %r14, %r15, 296(%r15)
 ; CHECK-NEXT:    br %r14
   %1 = alloca i32
@@ -37,13 +37,13 @@ define i32 @fn4() {
 ; CHECK-NEXT:    .cfi_offset %r15, -40
 ; CHECK-NEXT:    aghi %r15, -192
 ; CHECK-NEXT:    .cfi_def_cfa_offset 352
-; CHECK-NEXT:    mvhi 188(%r15), -1
+; CHECK-NEXT:    mvhi 164(%r15), -1
+; CHECK-NEXT:    mvghi 184(%r15), 0
 ; CHECK-NEXT:    mvghi 176(%r15), 0
+; CHECK-NEXT:    la %r2, 168(%r15)
 ; CHECK-NEXT:    mvghi 168(%r15), 0
-; CHECK-NEXT:    la %r2, 160(%r15)
-; CHECK-NEXT:    mvghi 160(%r15), 0
 ; CHECK-NEXT:    brasl %r14, fn3 at PLT
-; CHECK-NEXT:    l %r2, 188(%r15)
+; CHECK-NEXT:    l %r2, 164(%r15)
 ; CHECK-NEXT:    lmg %r14, %r15, 304(%r15)
 ; CHECK-NEXT:    br %r14
   %1 = alloca i32

diff  --git a/llvm/test/CodeGen/SystemZ/foldmemop-imm-01.ll b/llvm/test/CodeGen/SystemZ/foldmemop-imm-01.ll
index 08a32a39519bd..b799f33056445 100644
--- a/llvm/test/CodeGen/SystemZ/foldmemop-imm-01.ll
+++ b/llvm/test/CodeGen/SystemZ/foldmemop-imm-01.ll
@@ -5,10 +5,10 @@
 
 define i32 @fun0(i32 *%src, i32 %arg) nounwind {
 ; CHECK-LABEL: fun0:
-; CHECK: 	mvhi	164(%r15), 0            # 4-byte Folded Spill
-; CHECK:	mvc	164(4,%r15), 0(%r2)     # 4-byte Folded Spill
+; CHECK: 	mvhi	160(%r15), 0            # 4-byte Folded Spill
+; CHECK:	mvc	160(4,%r15), 0(%r2)     # 4-byte Folded Spill
 ; CHECK-LABEL: .LBB0_2:
-; CHECK:	chsi	164(%r15), 2            # 4-byte Folded Reload
+; CHECK:	chsi	160(%r15), 2            # 4-byte Folded Reload
 
 entry:
   %cmp  = icmp eq i32 %arg, 0
@@ -31,10 +31,10 @@ exit:
 
 define i64 @fun1(i64 *%src, i64 %arg) nounwind {
 ; CHECK-LABEL: fun1:
-; CHECK: 	mvghi	168(%r15), 0            # 8-byte Folded Spill
-; CHECK:	mvc	168(8,%r15), 0(%r2)     # 8-byte Folded Spill
+; CHECK: 	mvghi	160(%r15), 0            # 8-byte Folded Spill
+; CHECK:	mvc	160(8,%r15), 0(%r2)     # 8-byte Folded Spill
 ; CHECK-LABEL: .LBB1_2:
-; CHECK:	cghsi	168(%r15), 2            # 8-byte Folded Reload
+; CHECK:	cghsi	160(%r15), 2            # 8-byte Folded Reload
 entry:
   %cmp  = icmp eq i64 %arg, 0
   br i1 %cmp, label %cond, label %exit

diff  --git a/llvm/test/CodeGen/SystemZ/foldmemop-imm-02.mir b/llvm/test/CodeGen/SystemZ/foldmemop-imm-02.mir
index 8dbb88bacb628..92f176db0ae64 100644
--- a/llvm/test/CodeGen/SystemZ/foldmemop-imm-02.mir
+++ b/llvm/test/CodeGen/SystemZ/foldmemop-imm-02.mir
@@ -12,10 +12,10 @@
 
 
 # CHECK-LABEL: fun0:
-# CHECK: 	mvhi	164(%r15), 0            # 4-byte Folded Spill
-# CHECK:	mvc	164(4,%r15), 0(%r2)     # 4-byte Folded Spill
+# CHECK: 	mvhi	160(%r15), 0            # 4-byte Folded Spill
+# CHECK:	mvc	160(4,%r15), 0(%r2)     # 4-byte Folded Spill
 # CHECK-LABEL: .LBB0_2:
-# CHECK:	clfhsi	164(%r15), 2            # 4-byte Folded Reload
+# CHECK:	clfhsi	160(%r15), 2            # 4-byte Folded Reload
 ---
 name:            fun0
 alignment:       16
@@ -66,10 +66,10 @@ body:             |
 
 
 # CHECK-LABEL: fun1:
-# CHECK: 	mvghi	168(%r15), 0            # 8-byte Folded Spill
-# CHECK:	mvc	168(8,%r15), 0(%r2)     # 8-byte Folded Spill
+# CHECK: 	mvghi	160(%r15), 0            # 8-byte Folded Spill
+# CHECK:	mvc	160(8,%r15), 0(%r2)     # 8-byte Folded Spill
 # CHECK-LABEL: .LBB1_2:
-# CHECK:	clghsi	168(%r15), 2            # 8-byte Folded Reload
+# CHECK:	clghsi	160(%r15), 2            # 8-byte Folded Reload
 ---
 name:            fun1
 alignment:       16
@@ -122,10 +122,10 @@ body:             |
 # 17-bit immediate can not be folded
 
 # CHECK-LABEL: fun2:
-# CHECK: 	mvhi	164(%r15), 0            # 4-byte Folded Spill
-# CHECK:	mvc	164(4,%r15), 0(%r2)     # 4-byte Folded Spill
+# CHECK: 	mvhi	160(%r15), 0            # 4-byte Folded Spill
+# CHECK:	mvc	160(4,%r15), 0(%r2)     # 4-byte Folded Spill
 # CHECK-LABEL: .LBB2_2:
-# CHECK:        l       %r0, 164(%r15)          # 4-byte Folded Reload
+# CHECK:        l       %r0, 160(%r15)          # 4-byte Folded Reload
 # CHECK:        clfi    %r0, 65536
 ---
 name:            fun2
@@ -179,10 +179,10 @@ body:             |
 # 17-bit immediate can not be folded
 
 # CHECK-LABEL: fun3:
-# CHECK: 	mvghi	168(%r15), 0            # 8-byte Folded Spill
-# CHECK:	mvc	168(8,%r15), 0(%r2)     # 8-byte Folded Spill
+# CHECK: 	mvghi	160(%r15), 0            # 8-byte Folded Spill
+# CHECK:	mvc	160(8,%r15), 0(%r2)     # 8-byte Folded Spill
 # CHECK-LABEL: .LBB3_2:
-# CHECK:        lg      %r0, 168(%r15)          # 8-byte Folded Reload
+# CHECK:        lg      %r0, 160(%r15)          # 8-byte Folded Reload
 # CHECK:        clgfi   %r0, 65536
 ---
 name:            fun3

diff  --git a/llvm/test/CodeGen/SystemZ/foldmemop-msc.mir b/llvm/test/CodeGen/SystemZ/foldmemop-msc.mir
index b7efb2d193f69..8d67e6cfef1b3 100644
--- a/llvm/test/CodeGen/SystemZ/foldmemop-msc.mir
+++ b/llvm/test/CodeGen/SystemZ/foldmemop-msc.mir
@@ -13,7 +13,7 @@
 
 # CHECK-LABEL: fun0:
 # CHECK-LABEL: .LBB0_2:
-# CHECK:	msc     %r0, 164(%r15)                  # 4-byte Folded Reload
+# CHECK:	msc     %r0, 160(%r15)                  # 4-byte Folded Reload
 ---
 name:            fun0
 alignment:       16
@@ -65,7 +65,7 @@ body:             |
 
 # CHECK-LABEL: fun1:
 # CHECK-LABEL: .LBB1_2:
-# CHECK:	msc     %r0, 164(%r15)                  # 4-byte Folded Reload
+# CHECK:	msc     %r0, 160(%r15)                  # 4-byte Folded Reload
 ---
 name:            fun1
 alignment:       16
@@ -117,7 +117,7 @@ body:             |
 
 # CHECK-LABEL: fun2:
 # CHECK-LABEL: .LBB2_2:
-# CHECK:	msgc    %r0, 168(%r15)                  # 8-byte Folded Reload
+# CHECK:	msgc    %r0, 160(%r15)                  # 8-byte Folded Reload
 ---
 name:            fun2
 alignment:       16
@@ -169,7 +169,7 @@ body:             |
 
 # CHECK-LABEL: fun3:
 # CHECK-LABEL: .LBB3_2:
-# CHECK:	msgc    %r0, 168(%r15)                  # 8-byte Folded Reload
+# CHECK:	msgc    %r0, 160(%r15)                  # 8-byte Folded Reload
 ---
 name:            fun3
 alignment:       16

diff  --git a/llvm/test/CodeGen/SystemZ/frame-27.mir b/llvm/test/CodeGen/SystemZ/frame-27.mir
new file mode 100644
index 0000000000000..4cb0206e7210c
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/frame-27.mir
@@ -0,0 +1,203 @@
+# RUN: llc -mtriple=s390x-linux-gnu -start-before=prologepilog %s -o - -mcpu=z14 \
+# RUN:   -debug-only=prologepilog -print-after=prologepilog -verify-machineinstrs 2>&1 \
+# RUN:   | FileCheck %s
+# REQUIRES: asserts
+#
+# Test that stack objects are ordered in a good way with respect to the
+# displacement operands of users.
+
+--- |
+  define void @f1() { ret void }
+  define void @f2() { ret void }
+  define void @f3() { ret void }
+  define void @f4() { ret void }
+  define void @f5() { ret void }
+  define void @f6() { ret void }
+
+...
+
+### Test that %stack.0 is placed close to its D12 user.
+# CHECK:      alloc FI(1) at SP[-4255]
+# CHECK-NEXT: alloc FI(0) at SP[-4271]
+# CHECK-NEXT: alloc FI(2) at SP[-4280]
+# CHECK-NEXT: alloc FI(3) at SP[-4288]
+# CHECK-NEXT: # *** IR Dump After Prologue/Epilogue Insertion & Frame Finalization
+# CHECK-NEXT: # Machine code for function f1: IsSSA, NoPHIs, TracksLiveness, NoVRegs
+# CHECK-NOT:  LAY
+# CHECK:      VL32
+---
+name:            f1
+tracksRegLiveness: true
+stack:
+  - { id: 0, size: 16 }
+  - { id: 1, size: 4095 }
+machineFunctionInfo: {}
+body:             |
+  bb.0:
+    renamable $f0s = VL32 %stack.0, 0, $noreg
+    Return
+
+...
+
+### Test that %stack.1 is placed close to its D12 user.
+# CHECK:      alloc FI(0) at SP[-176]
+# CHECK-NEXT: alloc FI(1) at SP[-4271]
+# CHECK-NEXT: alloc FI(2) at SP[-4280]
+# CHECK-NEXT: alloc FI(3) at SP[-4288]
+# CHECK-NEXT: # *** IR Dump After Prologue/Epilogue Insertion & Frame Finalization
+# CHECK-NEXT: # Machine code for function f2: IsSSA, NoPHIs, TracksLiveness, NoVRegs
+# CHECK-NOT:  LAY
+# CHECK:      VL32
+---
+name:            f2
+tracksRegLiveness: true
+stack:
+  - { id: 0, size: 16 }
+  - { id: 1, size: 4095 }
+machineFunctionInfo: {}
+body:             |
+  bb.0:
+    renamable $f0s = VL32 %stack.1, 3916, $noreg
+    Return
+
+...
+
+### Swap the order of the objects so that both accesses are in range.
+# CHECK:      alloc FI(1) at SP[-8350]
+# CHECK-NEXT: alloc FI(0) at SP[-12445]
+# CHECK-NEXT: alloc FI(2) at SP[-12456]
+# CHECK-NEXT: alloc FI(3) at SP[-12464]
+# CHECK-NEXT: # *** IR Dump After Prologue/Epilogue Insertion & Frame Finalization
+# CHECK-NEXT: # Machine code for function f3: IsSSA, NoPHIs, TracksLiveness, NoVRegs
+# CHECK-NOT:  LAY
+# CHECK:      VL32
+# CHECK-NOT:  LAY
+# CHECK:      LEY
+---
+name:            f3
+tracksRegLiveness: true
+stack:
+  - { id: 0, size: 4095 }
+  - { id: 1, size: 8190 }
+machineFunctionInfo: {}
+body:             |
+  bb.0:
+    renamable $f0s = VL32 %stack.0, 0, $noreg
+    renamable $f0s = LE %stack.1, 0, $noreg
+    Return
+
+...
+
+### Reorder the objects so that all accesses are in range.
+# CHECK:      alloc FI(0) at SP[-8350]
+# CHECK-NEXT: alloc FI(2) at SP[-16540]
+# CHECK-NEXT: alloc FI(3) at SP[-24730]
+# CHECK-NEXT: alloc FI(1) at SP[-26777]
+# CHECK-NEXT: alloc FI(4) at SP[-28824]
+# CHECK-NEXT: alloc FI(5) at SP[-28832]
+# CHECK-NEXT: alloc FI(6) at SP[-28840]
+# CHECK-NEXT: # *** IR Dump After Prologue/Epilogue Insertion & Frame Finalization
+# CHECK-NEXT: # Machine code for function f4: IsSSA, NoPHIs, TracksLiveness, NoVRegs
+# CHECK-NOT:  LAY
+# CHECK:      LEY
+# CHECK-NEXT: VL32
+# CHECK-NEXT: LEY
+# CHECK-NEXT: LEY
+# CHECK-NEXT: VL32
+---
+name:            f4
+tracksRegLiveness: true
+stack:
+  - { id: 0, size: 8190 }
+  - { id: 1, size: 2047 }
+  - { id: 2, size: 8190 }
+  - { id: 3, size: 8190 }
+  - { id: 4, size: 2047 }
+machineFunctionInfo: {}
+body:             |
+  bb.0:
+    renamable $f2s = LE %stack.0, 0, $noreg
+    renamable $f0s = VL32 %stack.1, 0, $noreg
+    renamable $f3s = LEY %stack.2, 0, $noreg
+    renamable $f4s = LE %stack.3, 0, $noreg
+    renamable $f1s = VL32 %stack.4, 0, $noreg
+    Return
+
+...
+
+### Reorder the objects so that the VL32 object is in range and the LYs are
+### shortened to Ls (STOC cannot be shortened).
+# CHECK:      alloc FI(0) at SP[-8350]
+# CHECK-NEXT: alloc FI(1) at SP[-16540]
+# CHECK-NEXT: alloc FI(2) at SP[-24730]
+# CHECK-NEXT: alloc FI(3) at SP[-26777]
+# CHECK-NEXT: alloc FI(4) at SP[-26792]
+# CHECK-NEXT: alloc FI(5) at SP[-26800]
+# CHECK-NEXT: # *** IR Dump After Prologue/Epilogue Insertion & Frame Finalization
+# CHECK-NEXT: # Machine code for function f5: IsSSA, NoPHIs, TracksLiveness, NoVRegs
+# CHECK-NOT:  LAY
+# CHECK:      $r1l = L $r15
+# CHECK-NEXT: $r1l = L $r15
+# CHECK-NEXT: IMPLICIT_DEF
+# CHECK-NEXT: STOC
+# CHECK-NEXT: STOC
+# CHECK-NEXT: VL32
+---
+name:            f5
+tracksRegLiveness: true
+stack:
+  - { id: 0, size: 8190 }
+  - { id: 1, size: 8190 }
+  - { id: 2, size: 8190 }
+  - { id: 3, size: 2047 }
+machineFunctionInfo: {}
+body:             |
+  bb.0:
+    $r1l = LY %stack.2, 0, $noreg
+    $r1l = LY %stack.2, 0, $noreg
+    $cc = IMPLICIT_DEF
+    STOC $r1l, %stack.0, 0, 14, 8, implicit $cc
+    STOC $r1l, %stack.1, 0, 14, 8, implicit $cc
+    renamable $f3s = VL32 %stack.3, 0, $noreg
+    Return
+
+...
+
+### Test handling of a variable sized object.
+# CHECK:      alloc FI(1) at SP[-476]
+# CHECK-NEXT: alloc FI(0) at SP[-776]
+# CHECK-NEXT: alloc FI(2) at SP[-776]
+# CHECK-NEXT: # *** IR Dump After Prologue/Epilogue Insertion & Frame Finalization
+# CHECK-NEXT: # Machine code for function f6: IsSSA, NoPHIs, TracksLiveness, NoVRegs
+
+# CHECK:  $r15d = AGHI $r15d(tied-def 0), -776, implicit-def dead $cc
+# CHECK:  $r11d = LGR $r15d
+# CHECK:  renamable $r2d = ADJDYNALLOC renamable $r1d, 0, $noreg
+# CHECK:  VST64 renamable $f0d, $r11d, 160, $noreg
+# CHECK:  VST32 renamable $f1s, $r11d, 460, $noreg
+# CHECK:  VST32 killed renamable $f0s, killed renamable $r2d, 0, $noreg
+---
+name:            f6
+tracksRegLiveness: true
+stack:
+  - { id: 0, size: 300 }
+  - { id: 1, size: 316 }
+  - { id: 2, type: variable-sized }
+machineFunctionInfo: {}
+body:             |
+  bb.0 (%ir-block.0):
+    liveins: $f0d, $f0s, $f1s, $r2l
+
+    renamable $r2l = KILL $r2l, implicit-def $r2d
+    renamable $r1d = RISBGN undef renamable $r1d, killed renamable $r2d, 30, 189, 2
+    renamable $r0d = nuw LA killed renamable $r1d, 7, $noreg
+    renamable $r0d = RISBGN undef renamable $r0d, killed renamable $r0d, 29, 188, 0
+    renamable $r1d = SGRK $r15d, killed renamable $r0d, implicit-def dead $cc
+    renamable $r2d = ADJDYNALLOC renamable $r1d, 0, $noreg
+    $r15d = COPY killed renamable $r1d
+    VST64 renamable $f0d, %stack.0, 0, $noreg
+    VST32 renamable $f1s, %stack.1, 0, $noreg
+    VST32 killed renamable $f0s, killed renamable $r2d, 0, $noreg
+    Return
+
+...

diff  --git a/llvm/test/CodeGen/SystemZ/int-conv-01.ll b/llvm/test/CodeGen/SystemZ/int-conv-01.ll
index 7841dbe4336e8..4e6aed7e6f9f7 100644
--- a/llvm/test/CodeGen/SystemZ/int-conv-01.ll
+++ b/llvm/test/CodeGen/SystemZ/int-conv-01.ll
@@ -108,7 +108,7 @@ define i32 @f9(i64 %src, i64 %index) {
 ; to use LB if possible.
 define void @f10(i32 *%ptr) {
 ; CHECK-LABEL: f10:
-; CHECK: lb {{%r[0-9]+}}, 16{{[37]}}(%r15)
+; CHECK: lb {{%r[0-9]+}}, 191(%r15)
 ; CHECK: br %r14
   %val0 = load volatile i32, i32 *%ptr
   %val1 = load volatile i32, i32 *%ptr

diff  --git a/llvm/test/CodeGen/SystemZ/int-conv-02.ll b/llvm/test/CodeGen/SystemZ/int-conv-02.ll
index 292728bdb167e..8e5ee8c51f3e1 100644
--- a/llvm/test/CodeGen/SystemZ/int-conv-02.ll
+++ b/llvm/test/CodeGen/SystemZ/int-conv-02.ll
@@ -118,7 +118,7 @@ define i32 @f10(i64 %src, i64 %index) {
 ; to use LLC if possible.
 define void @f11(i32 *%ptr) {
 ; CHECK-LABEL: f11:
-; CHECK: llc {{%r[0-9]+}}, 16{{[37]}}(%r15)
+; CHECK: llc {{%r[0-9]+}}, 187(%r15)
 ; CHECK: br %r14
   %val0 = load volatile i32, i32 *%ptr
   %val1 = load volatile i32, i32 *%ptr

diff  --git a/llvm/test/CodeGen/SystemZ/int-conv-06.ll b/llvm/test/CodeGen/SystemZ/int-conv-06.ll
index a36bcd494ce05..908ac00bc9e45 100644
--- a/llvm/test/CodeGen/SystemZ/int-conv-06.ll
+++ b/llvm/test/CodeGen/SystemZ/int-conv-06.ll
@@ -118,7 +118,7 @@ define i32 @f10(i64 %src, i64 %index) {
 ; to use LLH if possible.
 define void @f11(i32 *%ptr) {
 ; CHECK-LABEL: f11:
-; CHECK: llh {{%r[0-9]+}}, 16{{[26]}}(%r15)
+; CHECK: llh {{%r[0-9]+}}, 186(%r15)
 ; CHECK: br %r14
   %val0 = load volatile i32, i32 *%ptr
   %val1 = load volatile i32, i32 *%ptr

diff  --git a/llvm/test/CodeGen/SystemZ/vector-constrained-fp-intrinsics.ll b/llvm/test/CodeGen/SystemZ/vector-constrained-fp-intrinsics.ll
index 8752e6b85eb43..fcaca553f1e12 100644
--- a/llvm/test/CodeGen/SystemZ/vector-constrained-fp-intrinsics.ll
+++ b/llvm/test/CodeGen/SystemZ/vector-constrained-fp-intrinsics.ll
@@ -422,26 +422,26 @@ define void @constrained_vector_frem_v3f64(<3 x double>* %a) #0 {
 ; SZ13-NEXT:    ld %f8, 16(%r2)
 ; SZ13-NEXT:    vgmg %v0, 2, 11
 ; SZ13-NEXT:    lgr %r13, %r2
-; SZ13-NEXT:    vst %v2, 160(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v2, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    # kill: def $f2d killed $f2d killed $v2
 ; SZ13-NEXT:    brasl %r14, fmod at PLT
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
-; SZ13-NEXT:    vl %v0, 160(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vl %v0, 176(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    vrepg %v2, %v0, 1
 ; SZ13-NEXT:    vgmg %v0, 1, 1
 ; SZ13-NEXT:    # kill: def $f2d killed $f2d killed $v2
 ; SZ13-NEXT:    brasl %r14, fmod at PLT
-; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v0, %v1, %v0
 ; SZ13-NEXT:    larl %r1, .LCPI8_0
-; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ldeb %f0, 0(%r1)
 ; SZ13-NEXT:    ldr %f2, %f8
 ; SZ13-NEXT:    brasl %r14, fmod at PLT
 ; SZ13-NEXT:    std %f0, 16(%r13)
-; SZ13-NEXT:    vl %v0, 176(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v0, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    ld %f8, 192(%r15) # 8-byte Folded Reload
 ; SZ13-NEXT:    vst %v0, 0(%r13), 4
 ; SZ13-NEXT:    lmg %r13, %r15, 304(%r15)
@@ -522,26 +522,26 @@ define <4 x double> @constrained_vector_frem_v4f64() #0 {
 ; SZ13-NEXT:    ldr %f2, %f8
 ; SZ13-NEXT:    brasl %r14, fmod at PLT
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    vgmg %v0, 2, 11
 ; SZ13-NEXT:    ldr %f2, %f8
 ; SZ13-NEXT:    brasl %r14, fmod at PLT
-; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v0, %v0, %v1
 ; SZ13-NEXT:    larl %r1, .LCPI9_1
-; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ldeb %f0, 0(%r1)
 ; SZ13-NEXT:    ldr %f2, %f8
 ; SZ13-NEXT:    brasl %r14, fmod at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI9_2
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ldeb %f0, 0(%r1)
 ; SZ13-NEXT:    ldr %f2, %f8
 ; SZ13-NEXT:    brasl %r14, fmod at PLT
-; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
-; SZ13-NEXT:    vl %v24, 176(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v24, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    ld %f8, 192(%r15) # 8-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v26, %v0, %v1
@@ -1465,25 +1465,25 @@ define void @constrained_vector_pow_v3f64(<3 x double>* %a) #0 {
 ; SZ13-NEXT:    ld %f8, 16(%r2)
 ; SZ13-NEXT:    ldr %f2, %f9
 ; SZ13-NEXT:    lgr %r13, %r2
-; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d killed $v0
 ; SZ13-NEXT:    brasl %r14, pow at PLT
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
-; SZ13-NEXT:    vl %v0, 160(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vl %v0, 176(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    ldr %f2, %f9
 ; SZ13-NEXT:    vrepg %v0, %v0, 1
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d killed $v0
 ; SZ13-NEXT:    brasl %r14, pow at PLT
-; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v0, %v1, %v0
-; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ldr %f0, %f8
 ; SZ13-NEXT:    ldr %f2, %f9
 ; SZ13-NEXT:    brasl %r14, pow at PLT
 ; SZ13-NEXT:    std %f0, 16(%r13)
-; SZ13-NEXT:    vl %v0, 176(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v0, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    ld %f8, 200(%r15) # 8-byte Folded Reload
 ; SZ13-NEXT:    ld %f9, 192(%r15) # 8-byte Folded Reload
 ; SZ13-NEXT:    vst %v0, 0(%r13), 4
@@ -1567,26 +1567,26 @@ define <4 x double> @constrained_vector_pow_v4f64() #0 {
 ; SZ13-NEXT:    brasl %r14, pow at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI34_2
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ld %f0, 0(%r1)
 ; SZ13-NEXT:    ldr %f2, %f8
 ; SZ13-NEXT:    brasl %r14, pow at PLT
-; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v0, %v0, %v1
 ; SZ13-NEXT:    larl %r1, .LCPI34_3
-; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ld %f0, 0(%r1)
 ; SZ13-NEXT:    ldr %f2, %f8
 ; SZ13-NEXT:    brasl %r14, pow at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI34_4
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ld %f0, 0(%r1)
 ; SZ13-NEXT:    ldr %f2, %f8
 ; SZ13-NEXT:    brasl %r14, pow at PLT
-; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
-; SZ13-NEXT:    vl %v24, 176(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v24, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    ld %f8, 192(%r15) # 8-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v26, %v0, %v1
@@ -1912,26 +1912,26 @@ define <4 x double> @constrained_vector_powi_v4f64() #0 {
 ; SZ13-NEXT:    brasl %r14, __powidf2 at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI39_1
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ld %f0, 0(%r1)
 ; SZ13-NEXT:    lghi %r2, 3
 ; SZ13-NEXT:    brasl %r14, __powidf2 at PLT
-; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v0, %v0, %v1
 ; SZ13-NEXT:    larl %r1, .LCPI39_2
-; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ld %f0, 0(%r1)
 ; SZ13-NEXT:    lghi %r2, 3
 ; SZ13-NEXT:    brasl %r14, __powidf2 at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI39_3
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ld %f0, 0(%r1)
 ; SZ13-NEXT:    lghi %r2, 3
 ; SZ13-NEXT:    brasl %r14, __powidf2 at PLT
-; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
-; SZ13-NEXT:    vl %v24, 176(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v24, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v26, %v0, %v1
 ; SZ13-NEXT:    lmg %r14, %r15, 304(%r15)
@@ -2149,23 +2149,23 @@ define void @constrained_vector_sin_v3f64(<3 x double>* %a) #0 {
 ; SZ13-NEXT:    vl %v0, 0(%r2), 4
 ; SZ13-NEXT:    ld %f8, 16(%r2)
 ; SZ13-NEXT:    lgr %r13, %r2
-; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d killed $v0
 ; SZ13-NEXT:    brasl %r14, sin at PLT
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
-; SZ13-NEXT:    vl %v0, 160(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vl %v0, 176(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    vrepg %v0, %v0, 1
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d killed $v0
 ; SZ13-NEXT:    brasl %r14, sin at PLT
-; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v0, %v1, %v0
-; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ldr %f0, %f8
 ; SZ13-NEXT:    brasl %r14, sin at PLT
 ; SZ13-NEXT:    std %f0, 16(%r13)
-; SZ13-NEXT:    vl %v0, 176(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v0, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    ld %f8, 192(%r15) # 8-byte Folded Reload
 ; SZ13-NEXT:    vst %v0, 0(%r13), 4
 ; SZ13-NEXT:    lmg %r13, %r15, 304(%r15)
@@ -2233,23 +2233,23 @@ define <4 x double> @constrained_vector_sin_v4f64() #0 {
 ; SZ13-NEXT:    brasl %r14, sin at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI44_1
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ldeb %f0, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, sin at PLT
-; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v0, %v0, %v1
 ; SZ13-NEXT:    larl %r1, .LCPI44_2
-; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ld %f0, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, sin at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI44_3
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ld %f0, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, sin at PLT
-; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
-; SZ13-NEXT:    vl %v24, 176(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v24, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v26, %v0, %v1
 ; SZ13-NEXT:    lmg %r14, %r15, 304(%r15)
@@ -2466,23 +2466,23 @@ define void @constrained_vector_cos_v3f64(<3 x double>* %a) #0 {
 ; SZ13-NEXT:    vl %v0, 0(%r2), 4
 ; SZ13-NEXT:    ld %f8, 16(%r2)
 ; SZ13-NEXT:    lgr %r13, %r2
-; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d killed $v0
 ; SZ13-NEXT:    brasl %r14, cos at PLT
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
-; SZ13-NEXT:    vl %v0, 160(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vl %v0, 176(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    vrepg %v0, %v0, 1
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d killed $v0
 ; SZ13-NEXT:    brasl %r14, cos at PLT
-; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v0, %v1, %v0
-; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ldr %f0, %f8
 ; SZ13-NEXT:    brasl %r14, cos at PLT
 ; SZ13-NEXT:    std %f0, 16(%r13)
-; SZ13-NEXT:    vl %v0, 176(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v0, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    ld %f8, 192(%r15) # 8-byte Folded Reload
 ; SZ13-NEXT:    vst %v0, 0(%r13), 4
 ; SZ13-NEXT:    lmg %r13, %r15, 304(%r15)
@@ -2550,23 +2550,23 @@ define <4 x double> @constrained_vector_cos_v4f64() #0 {
 ; SZ13-NEXT:    brasl %r14, cos at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI49_1
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ldeb %f0, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, cos at PLT
-; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v0, %v0, %v1
 ; SZ13-NEXT:    larl %r1, .LCPI49_2
-; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ld %f0, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, cos at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI49_3
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ld %f0, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, cos at PLT
-; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
-; SZ13-NEXT:    vl %v24, 176(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v24, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v26, %v0, %v1
 ; SZ13-NEXT:    lmg %r14, %r15, 304(%r15)
@@ -2783,23 +2783,23 @@ define void @constrained_vector_exp_v3f64(<3 x double>* %a) #0 {
 ; SZ13-NEXT:    vl %v0, 0(%r2), 4
 ; SZ13-NEXT:    ld %f8, 16(%r2)
 ; SZ13-NEXT:    lgr %r13, %r2
-; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d killed $v0
 ; SZ13-NEXT:    brasl %r14, exp at PLT
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
-; SZ13-NEXT:    vl %v0, 160(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vl %v0, 176(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    vrepg %v0, %v0, 1
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d killed $v0
 ; SZ13-NEXT:    brasl %r14, exp at PLT
-; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v0, %v1, %v0
-; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ldr %f0, %f8
 ; SZ13-NEXT:    brasl %r14, exp at PLT
 ; SZ13-NEXT:    std %f0, 16(%r13)
-; SZ13-NEXT:    vl %v0, 176(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v0, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    ld %f8, 192(%r15) # 8-byte Folded Reload
 ; SZ13-NEXT:    vst %v0, 0(%r13), 4
 ; SZ13-NEXT:    lmg %r13, %r15, 304(%r15)
@@ -2867,23 +2867,23 @@ define <4 x double> @constrained_vector_exp_v4f64() #0 {
 ; SZ13-NEXT:    brasl %r14, exp at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI54_1
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ldeb %f0, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, exp at PLT
-; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v0, %v0, %v1
 ; SZ13-NEXT:    larl %r1, .LCPI54_2
-; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ld %f0, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, exp at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI54_3
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ld %f0, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, exp at PLT
-; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
-; SZ13-NEXT:    vl %v24, 176(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v24, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v26, %v0, %v1
 ; SZ13-NEXT:    lmg %r14, %r15, 304(%r15)
@@ -3100,23 +3100,23 @@ define void @constrained_vector_exp2_v3f64(<3 x double>* %a) #0 {
 ; SZ13-NEXT:    vl %v0, 0(%r2), 4
 ; SZ13-NEXT:    ld %f8, 16(%r2)
 ; SZ13-NEXT:    lgr %r13, %r2
-; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d killed $v0
 ; SZ13-NEXT:    brasl %r14, exp2 at PLT
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
-; SZ13-NEXT:    vl %v0, 160(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vl %v0, 176(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    vrepg %v0, %v0, 1
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d killed $v0
 ; SZ13-NEXT:    brasl %r14, exp2 at PLT
-; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v0, %v1, %v0
-; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ldr %f0, %f8
 ; SZ13-NEXT:    brasl %r14, exp2 at PLT
 ; SZ13-NEXT:    std %f0, 16(%r13)
-; SZ13-NEXT:    vl %v0, 176(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v0, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    ld %f8, 192(%r15) # 8-byte Folded Reload
 ; SZ13-NEXT:    vst %v0, 0(%r13), 4
 ; SZ13-NEXT:    lmg %r13, %r15, 304(%r15)
@@ -3184,23 +3184,23 @@ define <4 x double> @constrained_vector_exp2_v4f64() #0 {
 ; SZ13-NEXT:    brasl %r14, exp2 at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI59_1
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ld %f0, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, exp2 at PLT
-; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v0, %v0, %v1
 ; SZ13-NEXT:    larl %r1, .LCPI59_2
-; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ld %f0, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, exp2 at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI59_3
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ld %f0, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, exp2 at PLT
-; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
-; SZ13-NEXT:    vl %v24, 176(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v24, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v26, %v0, %v1
 ; SZ13-NEXT:    lmg %r14, %r15, 304(%r15)
@@ -3417,23 +3417,23 @@ define void @constrained_vector_log_v3f64(<3 x double>* %a) #0 {
 ; SZ13-NEXT:    vl %v0, 0(%r2), 4
 ; SZ13-NEXT:    ld %f8, 16(%r2)
 ; SZ13-NEXT:    lgr %r13, %r2
-; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d killed $v0
 ; SZ13-NEXT:    brasl %r14, log at PLT
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
-; SZ13-NEXT:    vl %v0, 160(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vl %v0, 176(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    vrepg %v0, %v0, 1
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d killed $v0
 ; SZ13-NEXT:    brasl %r14, log at PLT
-; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v0, %v1, %v0
-; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ldr %f0, %f8
 ; SZ13-NEXT:    brasl %r14, log at PLT
 ; SZ13-NEXT:    std %f0, 16(%r13)
-; SZ13-NEXT:    vl %v0, 176(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v0, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    ld %f8, 192(%r15) # 8-byte Folded Reload
 ; SZ13-NEXT:    vst %v0, 0(%r13), 4
 ; SZ13-NEXT:    lmg %r13, %r15, 304(%r15)
@@ -3501,23 +3501,23 @@ define <4 x double> @constrained_vector_log_v4f64() #0 {
 ; SZ13-NEXT:    brasl %r14, log at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI64_1
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ldeb %f0, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, log at PLT
-; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v0, %v0, %v1
 ; SZ13-NEXT:    larl %r1, .LCPI64_2
-; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ld %f0, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, log at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI64_3
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ld %f0, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, log at PLT
-; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
-; SZ13-NEXT:    vl %v24, 176(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v24, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v26, %v0, %v1
 ; SZ13-NEXT:    lmg %r14, %r15, 304(%r15)
@@ -3734,23 +3734,23 @@ define void @constrained_vector_log10_v3f64(<3 x double>* %a) #0 {
 ; SZ13-NEXT:    vl %v0, 0(%r2), 4
 ; SZ13-NEXT:    ld %f8, 16(%r2)
 ; SZ13-NEXT:    lgr %r13, %r2
-; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d killed $v0
 ; SZ13-NEXT:    brasl %r14, log10 at PLT
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
-; SZ13-NEXT:    vl %v0, 160(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vl %v0, 176(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    vrepg %v0, %v0, 1
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d killed $v0
 ; SZ13-NEXT:    brasl %r14, log10 at PLT
-; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v0, %v1, %v0
-; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ldr %f0, %f8
 ; SZ13-NEXT:    brasl %r14, log10 at PLT
 ; SZ13-NEXT:    std %f0, 16(%r13)
-; SZ13-NEXT:    vl %v0, 176(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v0, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    ld %f8, 192(%r15) # 8-byte Folded Reload
 ; SZ13-NEXT:    vst %v0, 0(%r13), 4
 ; SZ13-NEXT:    lmg %r13, %r15, 304(%r15)
@@ -3818,23 +3818,23 @@ define <4 x double> @constrained_vector_log10_v4f64() #0 {
 ; SZ13-NEXT:    brasl %r14, log10 at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI69_1
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ldeb %f0, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, log10 at PLT
-; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v0, %v0, %v1
 ; SZ13-NEXT:    larl %r1, .LCPI69_2
-; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ld %f0, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, log10 at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI69_3
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ld %f0, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, log10 at PLT
-; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
-; SZ13-NEXT:    vl %v24, 176(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v24, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v26, %v0, %v1
 ; SZ13-NEXT:    lmg %r14, %r15, 304(%r15)
@@ -4051,23 +4051,23 @@ define void @constrained_vector_log2_v3f64(<3 x double>* %a) #0 {
 ; SZ13-NEXT:    vl %v0, 0(%r2), 4
 ; SZ13-NEXT:    ld %f8, 16(%r2)
 ; SZ13-NEXT:    lgr %r13, %r2
-; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d killed $v0
 ; SZ13-NEXT:    brasl %r14, log2 at PLT
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
-; SZ13-NEXT:    vl %v0, 160(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vl %v0, 176(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    vrepg %v0, %v0, 1
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d killed $v0
 ; SZ13-NEXT:    brasl %r14, log2 at PLT
-; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v0, %v1, %v0
-; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ldr %f0, %f8
 ; SZ13-NEXT:    brasl %r14, log2 at PLT
 ; SZ13-NEXT:    std %f0, 16(%r13)
-; SZ13-NEXT:    vl %v0, 176(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v0, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    ld %f8, 192(%r15) # 8-byte Folded Reload
 ; SZ13-NEXT:    vst %v0, 0(%r13), 4
 ; SZ13-NEXT:    lmg %r13, %r15, 304(%r15)
@@ -4135,23 +4135,23 @@ define <4 x double> @constrained_vector_log2_v4f64() #0 {
 ; SZ13-NEXT:    brasl %r14, log2 at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI74_1
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ldeb %f0, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, log2 at PLT
-; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v0, %v0, %v1
 ; SZ13-NEXT:    larl %r1, .LCPI74_2
-; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ld %f0, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, log2 at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI74_3
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ld %f0, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, log2 at PLT
-; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
-; SZ13-NEXT:    vl %v24, 176(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v24, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v26, %v0, %v1
 ; SZ13-NEXT:    lmg %r14, %r15, 304(%r15)
@@ -4791,27 +4791,27 @@ define void @constrained_vector_log10_maxnum_v3f64(<3 x double>* %a) #0 {
 ; SZ13-NEXT:    vl %v0, 0(%r2), 4
 ; SZ13-NEXT:    ld %f8, 16(%r2)
 ; SZ13-NEXT:    lgr %r13, %r2
-; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d killed $v0
 ; SZ13-NEXT:    brasl %r14, fmax at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI88_1
 ; SZ13-NEXT:    ldeb %f2, 0(%r1)
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
-; SZ13-NEXT:    vl %v0, 160(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vl %v0, 176(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    vrepg %v0, %v0, 1
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d killed $v0
 ; SZ13-NEXT:    brasl %r14, fmax at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI88_2
 ; SZ13-NEXT:    ldeb %f2, 0(%r1)
-; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v0, %v1, %v0
-; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ldr %f0, %f8
 ; SZ13-NEXT:    brasl %r14, fmax at PLT
 ; SZ13-NEXT:    std %f0, 16(%r13)
-; SZ13-NEXT:    vl %v0, 176(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v0, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    ld %f8, 192(%r15) # 8-byte Folded Reload
 ; SZ13-NEXT:    vst %v0, 0(%r13), 4
 ; SZ13-NEXT:    lmg %r13, %r15, 304(%r15)
@@ -4889,29 +4889,29 @@ define <4 x double> @constrained_vector_maxnum_v4f64() #0 {
 ; SZ13-NEXT:    brasl %r14, fmax at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI89_2
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ldeb %f0, 0(%r1)
 ; SZ13-NEXT:    larl %r1, .LCPI89_3
 ; SZ13-NEXT:    ldeb %f2, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, fmax at PLT
-; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v0, %v0, %v1
 ; SZ13-NEXT:    larl %r1, .LCPI89_4
-; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ldeb %f0, 0(%r1)
 ; SZ13-NEXT:    larl %r1, .LCPI89_5
 ; SZ13-NEXT:    ldeb %f2, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, fmax at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI89_6
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ldeb %f0, 0(%r1)
 ; SZ13-NEXT:    larl %r1, .LCPI89_7
 ; SZ13-NEXT:    ldeb %f2, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, fmax at PLT
-; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
-; SZ13-NEXT:    vl %v24, 176(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v24, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v26, %v0, %v1
 ; SZ13-NEXT:    lmg %r14, %r15, 304(%r15)
@@ -5170,25 +5170,25 @@ define void @constrained_vector_minnum_v3f64(<3 x double>* %a) #0 {
 ; SZ13-NEXT:    ld %f8, 16(%r2)
 ; SZ13-NEXT:    ldr %f2, %f9
 ; SZ13-NEXT:    lgr %r13, %r2
-; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d killed $v0
 ; SZ13-NEXT:    brasl %r14, fmin at PLT
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
-; SZ13-NEXT:    vl %v0, 160(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vl %v0, 176(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    ldr %f2, %f9
 ; SZ13-NEXT:    vrepg %v0, %v0, 1
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d killed $v0
 ; SZ13-NEXT:    brasl %r14, fmin at PLT
-; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v0, %v1, %v0
-; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ldr %f0, %f8
 ; SZ13-NEXT:    ldr %f2, %f9
 ; SZ13-NEXT:    brasl %r14, fmin at PLT
 ; SZ13-NEXT:    std %f0, 16(%r13)
-; SZ13-NEXT:    vl %v0, 176(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v0, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    ld %f8, 200(%r15) # 8-byte Folded Reload
 ; SZ13-NEXT:    ld %f9, 192(%r15) # 8-byte Folded Reload
 ; SZ13-NEXT:    vst %v0, 0(%r13), 4
@@ -5267,29 +5267,29 @@ define <4 x double> @constrained_vector_minnum_v4f64() #0 {
 ; SZ13-NEXT:    brasl %r14, fmin at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI94_2
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ldeb %f0, 0(%r1)
 ; SZ13-NEXT:    larl %r1, .LCPI94_3
 ; SZ13-NEXT:    ldeb %f2, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, fmin at PLT
-; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v0, %v0, %v1
 ; SZ13-NEXT:    larl %r1, .LCPI94_4
-; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ldeb %f0, 0(%r1)
 ; SZ13-NEXT:    larl %r1, .LCPI94_5
 ; SZ13-NEXT:    ldeb %f2, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, fmin at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI94_6
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ldeb %f0, 0(%r1)
 ; SZ13-NEXT:    larl %r1, .LCPI94_7
 ; SZ13-NEXT:    ldeb %f2, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, fmin at PLT
-; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
-; SZ13-NEXT:    vl %v24, 176(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v24, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v26, %v0, %v1
 ; SZ13-NEXT:    lmg %r14, %r15, 304(%r15)

diff  --git a/llvm/test/DebugInfo/SystemZ/variable-loc.ll b/llvm/test/DebugInfo/SystemZ/variable-loc.ll
index 6112a81643784..a538be0a9be68 100644
--- a/llvm/test/DebugInfo/SystemZ/variable-loc.ll
+++ b/llvm/test/DebugInfo/SystemZ/variable-loc.ll
@@ -10,12 +10,12 @@
 ;
 ; CHECK: main:
 ; CHECK: aghi    %r15, -568
-; CHECK: la      %r2, 164(%r11)
+; CHECK: la      %r2, 168(%r11)
 ; CHECK: brasl   %r14, populate_array at PLT
 
 ; DEBUG: DW_TAG_variable
 ; DEBUG-NOT: DW_TAG
-; DEBUG: DW_AT_location {{.*}}(DW_OP_fbreg +164)
+; DEBUG: DW_AT_location {{.*}}(DW_OP_fbreg +168)
 ; DEBUG-NOT: DW_TAG
 ; DEBUG: DW_AT_name {{.*}} "main_arr"
 

diff  --git a/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/systemz_generated_funcs.ll.generated.expected b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/systemz_generated_funcs.ll.generated.expected
index c22995e46cbd2..86a3b5e1fb3f5 100644
--- a/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/systemz_generated_funcs.ll.generated.expected
+++ b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/systemz_generated_funcs.ll.generated.expected
@@ -74,27 +74,27 @@ attributes #0 = { noredzone nounwind ssp uwtable "frame-pointer"="all" }
 ; CHECK-NEXT:    .cfi_def_cfa_register %r11
 ; CHECK-NEXT:    mvhi 180(%r11), 0
 ; CHECK-NEXT:    lhi %r0, 0
-; CHECK-NEXT:    mvhi 176(%r11), 0
+; CHECK-NEXT:    mvhi 168(%r11), 0
 ; CHECK-NEXT:    cije %r0, 0, .LBB0_3
 ; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    mvhi 168(%r11), 1
-; CHECK-NEXT:    chsi 176(%r11), 0
+; CHECK-NEXT:    mvhi 164(%r11), 1
+; CHECK-NEXT:    chsi 168(%r11), 0
 ; CHECK-NEXT:    je .LBB0_4
 ; CHECK-NEXT:  .LBB0_2:
-; CHECK-NEXT:    mvhi 168(%r11), 1
+; CHECK-NEXT:    mvhi 164(%r11), 1
 ; CHECK-NEXT:    j .LBB0_5
 ; CHECK-NEXT:  .LBB0_3:
-; CHECK-NEXT:    mvhi 176(%r11), 1
-; CHECK-NEXT:    mvhi 172(%r11), 2
-; CHECK-NEXT:    mvhi 168(%r11), 3
-; CHECK-NEXT:    mvhi 164(%r11), 4
-; CHECK-NEXT:    chsi 176(%r11), 0
+; CHECK-NEXT:    mvhi 168(%r11), 1
+; CHECK-NEXT:    mvhi 176(%r11), 2
+; CHECK-NEXT:    mvhi 164(%r11), 3
+; CHECK-NEXT:    mvhi 172(%r11), 4
+; CHECK-NEXT:    chsi 168(%r11), 0
 ; CHECK-NEXT:    jlh .LBB0_2
 ; CHECK-NEXT:  .LBB0_4:
-; CHECK-NEXT:    mvhi 176(%r11), 1
-; CHECK-NEXT:    mvhi 172(%r11), 2
-; CHECK-NEXT:    mvhi 168(%r11), 3
-; CHECK-NEXT:    mvhi 164(%r11), 4
+; CHECK-NEXT:    mvhi 168(%r11), 1
+; CHECK-NEXT:    mvhi 176(%r11), 2
+; CHECK-NEXT:    mvhi 164(%r11), 3
+; CHECK-NEXT:    mvhi 172(%r11), 4
 ; CHECK-NEXT:  .LBB0_5:
 ; CHECK-NEXT:    lhi %r2, 0
 ; CHECK-NEXT:    lmg %r11, %r15, 272(%r11)

diff  --git a/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/systemz_generated_funcs.ll.nogenerated.expected b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/systemz_generated_funcs.ll.nogenerated.expected
index 292d558f2b9fb..8a3b47282a610 100644
--- a/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/systemz_generated_funcs.ll.nogenerated.expected
+++ b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/systemz_generated_funcs.ll.nogenerated.expected
@@ -16,27 +16,27 @@ define dso_local i32 @check_boundaries() #0 {
 ; CHECK-NEXT:    .cfi_def_cfa_register %r11
 ; CHECK-NEXT:    mvhi 180(%r11), 0
 ; CHECK-NEXT:    lhi %r0, 0
-; CHECK-NEXT:    mvhi 176(%r11), 0
+; CHECK-NEXT:    mvhi 168(%r11), 0
 ; CHECK-NEXT:    cije %r0, 0, .LBB0_3
 ; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    mvhi 168(%r11), 1
-; CHECK-NEXT:    chsi 176(%r11), 0
+; CHECK-NEXT:    mvhi 164(%r11), 1
+; CHECK-NEXT:    chsi 168(%r11), 0
 ; CHECK-NEXT:    je .LBB0_4
 ; CHECK-NEXT:  .LBB0_2:
-; CHECK-NEXT:    mvhi 168(%r11), 1
+; CHECK-NEXT:    mvhi 164(%r11), 1
 ; CHECK-NEXT:    j .LBB0_5
 ; CHECK-NEXT:  .LBB0_3:
-; CHECK-NEXT:    mvhi 176(%r11), 1
-; CHECK-NEXT:    mvhi 172(%r11), 2
-; CHECK-NEXT:    mvhi 168(%r11), 3
-; CHECK-NEXT:    mvhi 164(%r11), 4
-; CHECK-NEXT:    chsi 176(%r11), 0
+; CHECK-NEXT:    mvhi 168(%r11), 1
+; CHECK-NEXT:    mvhi 176(%r11), 2
+; CHECK-NEXT:    mvhi 164(%r11), 3
+; CHECK-NEXT:    mvhi 172(%r11), 4
+; CHECK-NEXT:    chsi 168(%r11), 0
 ; CHECK-NEXT:    jlh .LBB0_2
 ; CHECK-NEXT:  .LBB0_4:
-; CHECK-NEXT:    mvhi 176(%r11), 1
-; CHECK-NEXT:    mvhi 172(%r11), 2
-; CHECK-NEXT:    mvhi 168(%r11), 3
-; CHECK-NEXT:    mvhi 164(%r11), 4
+; CHECK-NEXT:    mvhi 168(%r11), 1
+; CHECK-NEXT:    mvhi 176(%r11), 2
+; CHECK-NEXT:    mvhi 164(%r11), 3
+; CHECK-NEXT:    mvhi 172(%r11), 4
 ; CHECK-NEXT:  .LBB0_5:
 ; CHECK-NEXT:    lhi %r2, 0
 ; CHECK-NEXT:    lmg %r11, %r15, 272(%r11)


        


More information about the llvm-commits mailing list