[llvm] 2b6c234 - [LoongArch] Set isAsCheapAsAMove on ADDI.D and {X}ORI instructions (#94733)

via llvm-commits llvm-commits at lists.llvm.org
Wed Jun 12 07:02:51 PDT 2024


Author: hev
Date: 2024-06-12T22:02:47+08:00
New Revision: 2b6c23461d85b59b4abb8d738b1d4ab574bbe437

URL: https://github.com/llvm/llvm-project/commit/2b6c23461d85b59b4abb8d738b1d4ab574bbe437
DIFF: https://github.com/llvm/llvm-project/commit/2b6c23461d85b59b4abb8d738b1d4ab574bbe437.diff

LOG: [LoongArch] Set isAsCheapAsAMove on ADDI.D and {X}ORI instructions (#94733)

Added: 
    

Modified: 
    llvm/lib/Target/LoongArch/LoongArchInstrInfo.cpp
    llvm/lib/Target/LoongArch/LoongArchInstrInfo.h
    llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
    llvm/test/CodeGen/LoongArch/calling-conv-lp64d.ll
    llvm/test/CodeGen/LoongArch/calling-conv-lp64s.ll
    llvm/test/CodeGen/LoongArch/can-not-realign-stack.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw-fp.ll
    llvm/test/CodeGen/LoongArch/jump-table.ll
    llvm/test/CodeGen/LoongArch/register-coalescer-crash-pr79718.mir
    llvm/test/CodeGen/LoongArch/sextw-removal.ll
    llvm/test/CodeGen/LoongArch/vararg.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.cpp b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.cpp
index 6b75634f5b2ef..a85b054a85d72 100644
--- a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.cpp
@@ -229,6 +229,21 @@ unsigned LoongArchInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
   return MI.getDesc().getSize();
 }
 
+bool LoongArchInstrInfo::isAsCheapAsAMove(const MachineInstr &MI) const {
+  const unsigned Opcode = MI.getOpcode();
+  switch (Opcode) {
+  default:
+    break;
+  case LoongArch::ADDI_D:
+  case LoongArch::ORI:
+  case LoongArch::XORI:
+    return (MI.getOperand(1).isReg() &&
+            MI.getOperand(1).getReg() == LoongArch::R0) ||
+           (MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0);
+  }
+  return MI.isAsCheapAsAMove();
+}
+
 MachineBasicBlock *
 LoongArchInstrInfo::getBranchDestBlock(const MachineInstr &MI) const {
   assert(MI.getDesc().isBranch() && "Unexpected opcode!");

diff  --git a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.h b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.h
index 3b80f55bc84fc..eb19051e380ca 100644
--- a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.h
+++ b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.h
@@ -52,6 +52,8 @@ class LoongArchInstrInfo : public LoongArchGenInstrInfo {
 
   unsigned getInstSizeInBytes(const MachineInstr &MI) const override;
 
+  bool isAsCheapAsAMove(const MachineInstr &MI) const override;
+
   MachineBasicBlock *getBranchDestBlock(const MachineInstr &MI) const override;
 
   bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,

diff  --git a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
index f72f46e39e2a4..af61349db2188 100644
--- a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
+++ b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
@@ -751,7 +751,8 @@ def XOR  : ALU_3R<0x00158000>;
 def ANDN : ALU_3R<0x00168000>;
 def ORN  : ALU_3R<0x00160000>;
 def ANDI : ALU_2RI12<0x03400000, uimm12>;
-let isReMaterializable = 1 in {
+// See LoongArchInstrInfo::isAsCheapAsAMove for more details.
+let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
 def ORI  : ALU_2RI12<0x03800000, uimm12_ori>;
 def XORI : ALU_2RI12<0x03c00000, uimm12>;
 }
@@ -858,7 +859,8 @@ def ADD_D : ALU_3R<0x00108000>;
 def SUB_D : ALU_3R<0x00118000>;
 // ADDI_D isn't always rematerializable, but isReMaterializable will be used as
 // a hint which is verified in isReallyTriviallyReMaterializable.
-let isReMaterializable = 1 in {
+// See LoongArchInstrInfo::isAsCheapAsAMove for more details.
+let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
 def ADDI_D : ALU_2RI12<0x02c00000, simm12_addlike>;
 }
 def ADDU16I_D : ALU_2RI16<0x10000000, simm16>;

diff  --git a/llvm/test/CodeGen/LoongArch/calling-conv-lp64d.ll b/llvm/test/CodeGen/LoongArch/calling-conv-lp64d.ll
index 5e89fd41d6a7c..cc6ba057019c6 100644
--- a/llvm/test/CodeGen/LoongArch/calling-conv-lp64d.ll
+++ b/llvm/test/CodeGen/LoongArch/calling-conv-lp64d.ll
@@ -28,9 +28,9 @@ define i64 @caller_float_in_fpr() nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi.d $sp, $sp, -16
 ; CHECK-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; CHECK-NEXT:    ori $a0, $zero, 1
 ; CHECK-NEXT:    movgr2fr.w $fa0, $zero
 ; CHECK-NEXT:    movgr2fr.d $fa1, $zero
+; CHECK-NEXT:    ori $a0, $zero, 1
 ; CHECK-NEXT:    bl %plt(callee_float_in_fpr)
 ; CHECK-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
 ; CHECK-NEXT:    addi.d $sp, $sp, 16

diff  --git a/llvm/test/CodeGen/LoongArch/calling-conv-lp64s.ll b/llvm/test/CodeGen/LoongArch/calling-conv-lp64s.ll
index 8158c387f7b6b..558b9457239c1 100644
--- a/llvm/test/CodeGen/LoongArch/calling-conv-lp64s.ll
+++ b/llvm/test/CodeGen/LoongArch/calling-conv-lp64s.ll
@@ -27,8 +27,8 @@ define i64 @caller_float_in_regs() nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi.d $sp, $sp, -16
 ; CHECK-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; CHECK-NEXT:    ori $a0, $zero, 1
 ; CHECK-NEXT:    lu12i.w $a1, 262144
+; CHECK-NEXT:    ori $a0, $zero, 1
 ; CHECK-NEXT:    bl %plt(callee_float_in_regs)
 ; CHECK-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
 ; CHECK-NEXT:    addi.d $sp, $sp, 16

diff  --git a/llvm/test/CodeGen/LoongArch/can-not-realign-stack.ll b/llvm/test/CodeGen/LoongArch/can-not-realign-stack.ll
index a3d85f6bd06d8..49155a3966a84 100644
--- a/llvm/test/CodeGen/LoongArch/can-not-realign-stack.ll
+++ b/llvm/test/CodeGen/LoongArch/can-not-realign-stack.ll
@@ -16,7 +16,6 @@ define dso_local noundef signext i32 @main() nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addi.d $sp, $sp, -272
 ; CHECK-NEXT:    st.d $ra, $sp, 264 # 8-byte Folded Spill
-; CHECK-NEXT:    st.d $fp, $sp, 256 # 8-byte Folded Spill
 ; CHECK-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI0_0)
 ; CHECK-NEXT:    addi.d $a0, $a0, %pc_lo12(.LCPI0_0)
 ; CHECK-NEXT:    xvld $xr0, $a0, 0
@@ -33,25 +32,23 @@ define dso_local noundef signext i32 @main() nounwind {
 ; CHECK-NEXT:    addi.d $a0, $a0, %pc_lo12(.LCPI0_3)
 ; CHECK-NEXT:    xvld $xr3, $a0, 0
 ; CHECK-NEXT:    xvst $xr3, $sp, 0 # 32-byte Folded Spill
-; CHECK-NEXT:    xvst $xr0, $sp, 128
-; CHECK-NEXT:    xvst $xr1, $sp, 160
-; CHECK-NEXT:    xvst $xr2, $sp, 192
-; CHECK-NEXT:    xvst $xr3, $sp, 224
-; CHECK-NEXT:    addi.d $fp, $sp, 128
-; CHECK-NEXT:    move $a0, $fp
+; CHECK-NEXT:    xvst $xr0, $sp, 136
+; CHECK-NEXT:    xvst $xr1, $sp, 168
+; CHECK-NEXT:    xvst $xr2, $sp, 200
+; CHECK-NEXT:    xvst $xr3, $sp, 232
+; CHECK-NEXT:    addi.d $a0, $sp, 136
 ; CHECK-NEXT:    bl %plt(foo)
 ; CHECK-NEXT:    xvld $xr0, $sp, 96 # 32-byte Folded Reload
-; CHECK-NEXT:    xvst $xr0, $sp, 128
+; CHECK-NEXT:    xvst $xr0, $sp, 136
 ; CHECK-NEXT:    xvld $xr0, $sp, 64 # 32-byte Folded Reload
-; CHECK-NEXT:    xvst $xr0, $sp, 160
+; CHECK-NEXT:    xvst $xr0, $sp, 168
 ; CHECK-NEXT:    xvld $xr0, $sp, 32 # 32-byte Folded Reload
-; CHECK-NEXT:    xvst $xr0, $sp, 192
+; CHECK-NEXT:    xvst $xr0, $sp, 200
 ; CHECK-NEXT:    xvld $xr0, $sp, 0 # 32-byte Folded Reload
-; CHECK-NEXT:    xvst $xr0, $sp, 224
-; CHECK-NEXT:    move $a0, $fp
+; CHECK-NEXT:    xvst $xr0, $sp, 232
+; CHECK-NEXT:    addi.d $a0, $sp, 136
 ; CHECK-NEXT:    bl %plt(bar)
 ; CHECK-NEXT:    move $a0, $zero
-; CHECK-NEXT:    ld.d $fp, $sp, 256 # 8-byte Folded Reload
 ; CHECK-NEXT:    ld.d $ra, $sp, 264 # 8-byte Folded Reload
 ; CHECK-NEXT:    addi.d $sp, $sp, 272
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw-fp.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw-fp.ll
index 81cc29419a0e0..193fa6c08600a 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw-fp.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw-fp.ll
@@ -297,95 +297,71 @@ define float @float_fmax_acquire(ptr %p) nounwind {
 define double @double_fadd_acquire(ptr %p) nounwind {
 ; LA64F-LABEL: double_fadd_acquire:
 ; LA64F:       # %bb.0:
-; LA64F-NEXT:    addi.d $sp, $sp, -80
-; LA64F-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s4, $sp, 24 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s5, $sp, 16 # 8-byte Folded Spill
+; LA64F-NEXT:    addi.d $sp, $sp, -48
+; LA64F-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s0, $sp, 24 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s1, $sp, 16 # 8-byte Folded Spill
 ; LA64F-NEXT:    move $fp, $a0
-; LA64F-NEXT:    ld.d $s5, $a0, 0
+; LA64F-NEXT:    ld.d $s1, $a0, 0
 ; LA64F-NEXT:    lu52i.d $s0, $zero, 1023
-; LA64F-NEXT:    ori $s1, $zero, 8
-; LA64F-NEXT:    addi.d $s2, $sp, 8
-; LA64F-NEXT:    addi.d $s3, $sp, 0
-; LA64F-NEXT:    ori $s4, $zero, 2
 ; LA64F-NEXT:    .p2align 4, , 16
 ; LA64F-NEXT:  .LBB4_1: # %atomicrmw.start
 ; LA64F-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64F-NEXT:    move $a0, $s5
+; LA64F-NEXT:    move $a0, $s1
 ; LA64F-NEXT:    move $a1, $s0
 ; LA64F-NEXT:    bl %plt(__adddf3)
-; LA64F-NEXT:    st.d $s5, $sp, 8
+; LA64F-NEXT:    st.d $s1, $sp, 8
 ; LA64F-NEXT:    st.d $a0, $sp, 0
-; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    ori $a0, $zero, 8
+; LA64F-NEXT:    addi.d $a2, $sp, 8
+; LA64F-NEXT:    addi.d $a3, $sp, 0
+; LA64F-NEXT:    ori $a4, $zero, 2
+; LA64F-NEXT:    ori $a5, $zero, 2
 ; LA64F-NEXT:    move $a1, $fp
-; LA64F-NEXT:    move $a2, $s2
-; LA64F-NEXT:    move $a3, $s3
-; LA64F-NEXT:    move $a4, $s4
-; LA64F-NEXT:    move $a5, $s4
 ; LA64F-NEXT:    bl %plt(__atomic_compare_exchange)
-; LA64F-NEXT:    ld.d $s5, $sp, 8
+; LA64F-NEXT:    ld.d $s1, $sp, 8
 ; LA64F-NEXT:    beqz $a0, .LBB4_1
 ; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64F-NEXT:    move $a0, $s5
-; LA64F-NEXT:    ld.d $s5, $sp, 16 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s4, $sp, 24 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
-; LA64F-NEXT:    addi.d $sp, $sp, 80
+; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    ld.d $s1, $sp, 16 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s0, $sp, 24 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64F-NEXT:    addi.d $sp, $sp, 48
 ; LA64F-NEXT:    ret
 ;
 ; LA64D-LABEL: double_fadd_acquire:
 ; LA64D:       # %bb.0:
-; LA64D-NEXT:    addi.d $sp, $sp, -80
-; LA64D-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
+; LA64D-NEXT:    addi.d $sp, $sp, -48
+; LA64D-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
 ; LA64D-NEXT:    fst.d $fs0, $sp, 24 # 8-byte Folded Spill
 ; LA64D-NEXT:    move $fp, $a0
 ; LA64D-NEXT:    fld.d $fa0, $a0, 0
 ; LA64D-NEXT:    addi.d $a0, $zero, 1
 ; LA64D-NEXT:    movgr2fr.d $fa1, $a0
 ; LA64D-NEXT:    ffint.d.l $fs0, $fa1
-; LA64D-NEXT:    ori $s0, $zero, 8
-; LA64D-NEXT:    addi.d $s1, $sp, 16
-; LA64D-NEXT:    addi.d $s2, $sp, 8
-; LA64D-NEXT:    ori $s3, $zero, 2
 ; LA64D-NEXT:    .p2align 4, , 16
 ; LA64D-NEXT:  .LBB4_1: # %atomicrmw.start
 ; LA64D-NEXT:    # =>This Inner Loop Header: Depth=1
 ; LA64D-NEXT:    fadd.d $fa1, $fa0, $fs0
 ; LA64D-NEXT:    fst.d $fa0, $sp, 16
 ; LA64D-NEXT:    fst.d $fa1, $sp, 8
-; LA64D-NEXT:    move $a0, $s0
+; LA64D-NEXT:    ori $a0, $zero, 8
+; LA64D-NEXT:    addi.d $a2, $sp, 16
+; LA64D-NEXT:    addi.d $a3, $sp, 8
+; LA64D-NEXT:    ori $a4, $zero, 2
+; LA64D-NEXT:    ori $a5, $zero, 2
 ; LA64D-NEXT:    move $a1, $fp
-; LA64D-NEXT:    move $a2, $s1
-; LA64D-NEXT:    move $a3, $s2
-; LA64D-NEXT:    move $a4, $s3
-; LA64D-NEXT:    move $a5, $s3
 ; LA64D-NEXT:    bl %plt(__atomic_compare_exchange)
 ; LA64D-NEXT:    fld.d $fa0, $sp, 16
 ; LA64D-NEXT:    beqz $a0, .LBB4_1
 ; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64D-NEXT:    fld.d $fs0, $sp, 24 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
-; LA64D-NEXT:    addi.d $sp, $sp, 80
+; LA64D-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64D-NEXT:    addi.d $sp, $sp, 48
 ; LA64D-NEXT:    ret
   %v = atomicrmw fadd ptr %p, double 1.0 acquire, align 4
   ret double %v
@@ -394,95 +370,71 @@ define double @double_fadd_acquire(ptr %p) nounwind {
 define double @double_fsub_acquire(ptr %p) nounwind {
 ; LA64F-LABEL: double_fsub_acquire:
 ; LA64F:       # %bb.0:
-; LA64F-NEXT:    addi.d $sp, $sp, -80
-; LA64F-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s4, $sp, 24 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s5, $sp, 16 # 8-byte Folded Spill
+; LA64F-NEXT:    addi.d $sp, $sp, -48
+; LA64F-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s0, $sp, 24 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s1, $sp, 16 # 8-byte Folded Spill
 ; LA64F-NEXT:    move $fp, $a0
-; LA64F-NEXT:    ld.d $s5, $a0, 0
+; LA64F-NEXT:    ld.d $s1, $a0, 0
 ; LA64F-NEXT:    lu52i.d $s0, $zero, -1025
-; LA64F-NEXT:    ori $s1, $zero, 8
-; LA64F-NEXT:    addi.d $s2, $sp, 8
-; LA64F-NEXT:    addi.d $s3, $sp, 0
-; LA64F-NEXT:    ori $s4, $zero, 2
 ; LA64F-NEXT:    .p2align 4, , 16
 ; LA64F-NEXT:  .LBB5_1: # %atomicrmw.start
 ; LA64F-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64F-NEXT:    move $a0, $s5
+; LA64F-NEXT:    move $a0, $s1
 ; LA64F-NEXT:    move $a1, $s0
 ; LA64F-NEXT:    bl %plt(__adddf3)
-; LA64F-NEXT:    st.d $s5, $sp, 8
+; LA64F-NEXT:    st.d $s1, $sp, 8
 ; LA64F-NEXT:    st.d $a0, $sp, 0
-; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    ori $a0, $zero, 8
+; LA64F-NEXT:    addi.d $a2, $sp, 8
+; LA64F-NEXT:    addi.d $a3, $sp, 0
+; LA64F-NEXT:    ori $a4, $zero, 2
+; LA64F-NEXT:    ori $a5, $zero, 2
 ; LA64F-NEXT:    move $a1, $fp
-; LA64F-NEXT:    move $a2, $s2
-; LA64F-NEXT:    move $a3, $s3
-; LA64F-NEXT:    move $a4, $s4
-; LA64F-NEXT:    move $a5, $s4
 ; LA64F-NEXT:    bl %plt(__atomic_compare_exchange)
-; LA64F-NEXT:    ld.d $s5, $sp, 8
+; LA64F-NEXT:    ld.d $s1, $sp, 8
 ; LA64F-NEXT:    beqz $a0, .LBB5_1
 ; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64F-NEXT:    move $a0, $s5
-; LA64F-NEXT:    ld.d $s5, $sp, 16 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s4, $sp, 24 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
-; LA64F-NEXT:    addi.d $sp, $sp, 80
+; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    ld.d $s1, $sp, 16 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s0, $sp, 24 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64F-NEXT:    addi.d $sp, $sp, 48
 ; LA64F-NEXT:    ret
 ;
 ; LA64D-LABEL: double_fsub_acquire:
 ; LA64D:       # %bb.0:
-; LA64D-NEXT:    addi.d $sp, $sp, -80
-; LA64D-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
+; LA64D-NEXT:    addi.d $sp, $sp, -48
+; LA64D-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
 ; LA64D-NEXT:    fst.d $fs0, $sp, 24 # 8-byte Folded Spill
 ; LA64D-NEXT:    move $fp, $a0
 ; LA64D-NEXT:    fld.d $fa0, $a0, 0
 ; LA64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_0)
 ; LA64D-NEXT:    addi.d $a0, $a0, %pc_lo12(.LCPI5_0)
 ; LA64D-NEXT:    fld.d $fs0, $a0, 0
-; LA64D-NEXT:    ori $s0, $zero, 8
-; LA64D-NEXT:    addi.d $s1, $sp, 16
-; LA64D-NEXT:    addi.d $s2, $sp, 8
-; LA64D-NEXT:    ori $s3, $zero, 2
 ; LA64D-NEXT:    .p2align 4, , 16
 ; LA64D-NEXT:  .LBB5_1: # %atomicrmw.start
 ; LA64D-NEXT:    # =>This Inner Loop Header: Depth=1
 ; LA64D-NEXT:    fadd.d $fa1, $fa0, $fs0
 ; LA64D-NEXT:    fst.d $fa0, $sp, 16
 ; LA64D-NEXT:    fst.d $fa1, $sp, 8
-; LA64D-NEXT:    move $a0, $s0
+; LA64D-NEXT:    ori $a0, $zero, 8
+; LA64D-NEXT:    addi.d $a2, $sp, 16
+; LA64D-NEXT:    addi.d $a3, $sp, 8
+; LA64D-NEXT:    ori $a4, $zero, 2
+; LA64D-NEXT:    ori $a5, $zero, 2
 ; LA64D-NEXT:    move $a1, $fp
-; LA64D-NEXT:    move $a2, $s1
-; LA64D-NEXT:    move $a3, $s2
-; LA64D-NEXT:    move $a4, $s3
-; LA64D-NEXT:    move $a5, $s3
 ; LA64D-NEXT:    bl %plt(__atomic_compare_exchange)
 ; LA64D-NEXT:    fld.d $fa0, $sp, 16
 ; LA64D-NEXT:    beqz $a0, .LBB5_1
 ; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64D-NEXT:    fld.d $fs0, $sp, 24 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
-; LA64D-NEXT:    addi.d $sp, $sp, 80
+; LA64D-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64D-NEXT:    addi.d $sp, $sp, 48
 ; LA64D-NEXT:    ret
   %v = atomicrmw fsub ptr %p, double 1.0 acquire, align 4
   ret double %v
@@ -491,71 +443,51 @@ define double @double_fsub_acquire(ptr %p) nounwind {
 define double @double_fmin_acquire(ptr %p) nounwind {
 ; LA64F-LABEL: double_fmin_acquire:
 ; LA64F:       # %bb.0:
-; LA64F-NEXT:    addi.d $sp, $sp, -80
-; LA64F-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s4, $sp, 24 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s5, $sp, 16 # 8-byte Folded Spill
+; LA64F-NEXT:    addi.d $sp, $sp, -48
+; LA64F-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s0, $sp, 24 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s1, $sp, 16 # 8-byte Folded Spill
 ; LA64F-NEXT:    move $fp, $a0
-; LA64F-NEXT:    ld.d $s5, $a0, 0
+; LA64F-NEXT:    ld.d $s1, $a0, 0
 ; LA64F-NEXT:    lu52i.d $s0, $zero, 1023
-; LA64F-NEXT:    ori $s1, $zero, 8
-; LA64F-NEXT:    addi.d $s2, $sp, 8
-; LA64F-NEXT:    addi.d $s3, $sp, 0
-; LA64F-NEXT:    ori $s4, $zero, 2
 ; LA64F-NEXT:    .p2align 4, , 16
 ; LA64F-NEXT:  .LBB6_1: # %atomicrmw.start
 ; LA64F-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64F-NEXT:    move $a0, $s5
+; LA64F-NEXT:    move $a0, $s1
 ; LA64F-NEXT:    move $a1, $s0
 ; LA64F-NEXT:    bl %plt(fmin)
-; LA64F-NEXT:    st.d $s5, $sp, 8
+; LA64F-NEXT:    st.d $s1, $sp, 8
 ; LA64F-NEXT:    st.d $a0, $sp, 0
-; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    ori $a0, $zero, 8
+; LA64F-NEXT:    addi.d $a2, $sp, 8
+; LA64F-NEXT:    addi.d $a3, $sp, 0
+; LA64F-NEXT:    ori $a4, $zero, 2
+; LA64F-NEXT:    ori $a5, $zero, 2
 ; LA64F-NEXT:    move $a1, $fp
-; LA64F-NEXT:    move $a2, $s2
-; LA64F-NEXT:    move $a3, $s3
-; LA64F-NEXT:    move $a4, $s4
-; LA64F-NEXT:    move $a5, $s4
 ; LA64F-NEXT:    bl %plt(__atomic_compare_exchange)
-; LA64F-NEXT:    ld.d $s5, $sp, 8
+; LA64F-NEXT:    ld.d $s1, $sp, 8
 ; LA64F-NEXT:    beqz $a0, .LBB6_1
 ; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64F-NEXT:    move $a0, $s5
-; LA64F-NEXT:    ld.d $s5, $sp, 16 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s4, $sp, 24 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
-; LA64F-NEXT:    addi.d $sp, $sp, 80
+; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    ld.d $s1, $sp, 16 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s0, $sp, 24 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64F-NEXT:    addi.d $sp, $sp, 48
 ; LA64F-NEXT:    ret
 ;
 ; LA64D-LABEL: double_fmin_acquire:
 ; LA64D:       # %bb.0:
-; LA64D-NEXT:    addi.d $sp, $sp, -80
-; LA64D-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
+; LA64D-NEXT:    addi.d $sp, $sp, -48
+; LA64D-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
 ; LA64D-NEXT:    fst.d $fs0, $sp, 24 # 8-byte Folded Spill
 ; LA64D-NEXT:    move $fp, $a0
 ; LA64D-NEXT:    fld.d $fa0, $a0, 0
 ; LA64D-NEXT:    addi.d $a0, $zero, 1
 ; LA64D-NEXT:    movgr2fr.d $fa1, $a0
 ; LA64D-NEXT:    ffint.d.l $fs0, $fa1
-; LA64D-NEXT:    ori $s0, $zero, 8
-; LA64D-NEXT:    addi.d $s1, $sp, 16
-; LA64D-NEXT:    addi.d $s2, $sp, 8
-; LA64D-NEXT:    ori $s3, $zero, 2
 ; LA64D-NEXT:    .p2align 4, , 16
 ; LA64D-NEXT:  .LBB6_1: # %atomicrmw.start
 ; LA64D-NEXT:    # =>This Inner Loop Header: Depth=1
@@ -563,24 +495,20 @@ define double @double_fmin_acquire(ptr %p) nounwind {
 ; LA64D-NEXT:    fmin.d $fa1, $fa1, $fs0
 ; LA64D-NEXT:    fst.d $fa0, $sp, 16
 ; LA64D-NEXT:    fst.d $fa1, $sp, 8
-; LA64D-NEXT:    move $a0, $s0
+; LA64D-NEXT:    ori $a0, $zero, 8
+; LA64D-NEXT:    addi.d $a2, $sp, 16
+; LA64D-NEXT:    addi.d $a3, $sp, 8
+; LA64D-NEXT:    ori $a4, $zero, 2
+; LA64D-NEXT:    ori $a5, $zero, 2
 ; LA64D-NEXT:    move $a1, $fp
-; LA64D-NEXT:    move $a2, $s1
-; LA64D-NEXT:    move $a3, $s2
-; LA64D-NEXT:    move $a4, $s3
-; LA64D-NEXT:    move $a5, $s3
 ; LA64D-NEXT:    bl %plt(__atomic_compare_exchange)
 ; LA64D-NEXT:    fld.d $fa0, $sp, 16
 ; LA64D-NEXT:    beqz $a0, .LBB6_1
 ; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64D-NEXT:    fld.d $fs0, $sp, 24 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
-; LA64D-NEXT:    addi.d $sp, $sp, 80
+; LA64D-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64D-NEXT:    addi.d $sp, $sp, 48
 ; LA64D-NEXT:    ret
   %v = atomicrmw fmin ptr %p, double 1.0 acquire, align 4
   ret double %v
@@ -589,71 +517,51 @@ define double @double_fmin_acquire(ptr %p) nounwind {
 define double @double_fmax_acquire(ptr %p) nounwind {
 ; LA64F-LABEL: double_fmax_acquire:
 ; LA64F:       # %bb.0:
-; LA64F-NEXT:    addi.d $sp, $sp, -80
-; LA64F-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s4, $sp, 24 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s5, $sp, 16 # 8-byte Folded Spill
+; LA64F-NEXT:    addi.d $sp, $sp, -48
+; LA64F-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s0, $sp, 24 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s1, $sp, 16 # 8-byte Folded Spill
 ; LA64F-NEXT:    move $fp, $a0
-; LA64F-NEXT:    ld.d $s5, $a0, 0
+; LA64F-NEXT:    ld.d $s1, $a0, 0
 ; LA64F-NEXT:    lu52i.d $s0, $zero, 1023
-; LA64F-NEXT:    ori $s1, $zero, 8
-; LA64F-NEXT:    addi.d $s2, $sp, 8
-; LA64F-NEXT:    addi.d $s3, $sp, 0
-; LA64F-NEXT:    ori $s4, $zero, 2
 ; LA64F-NEXT:    .p2align 4, , 16
 ; LA64F-NEXT:  .LBB7_1: # %atomicrmw.start
 ; LA64F-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64F-NEXT:    move $a0, $s5
+; LA64F-NEXT:    move $a0, $s1
 ; LA64F-NEXT:    move $a1, $s0
 ; LA64F-NEXT:    bl %plt(fmax)
-; LA64F-NEXT:    st.d $s5, $sp, 8
+; LA64F-NEXT:    st.d $s1, $sp, 8
 ; LA64F-NEXT:    st.d $a0, $sp, 0
-; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    ori $a0, $zero, 8
+; LA64F-NEXT:    addi.d $a2, $sp, 8
+; LA64F-NEXT:    addi.d $a3, $sp, 0
+; LA64F-NEXT:    ori $a4, $zero, 2
+; LA64F-NEXT:    ori $a5, $zero, 2
 ; LA64F-NEXT:    move $a1, $fp
-; LA64F-NEXT:    move $a2, $s2
-; LA64F-NEXT:    move $a3, $s3
-; LA64F-NEXT:    move $a4, $s4
-; LA64F-NEXT:    move $a5, $s4
 ; LA64F-NEXT:    bl %plt(__atomic_compare_exchange)
-; LA64F-NEXT:    ld.d $s5, $sp, 8
+; LA64F-NEXT:    ld.d $s1, $sp, 8
 ; LA64F-NEXT:    beqz $a0, .LBB7_1
 ; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64F-NEXT:    move $a0, $s5
-; LA64F-NEXT:    ld.d $s5, $sp, 16 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s4, $sp, 24 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
-; LA64F-NEXT:    addi.d $sp, $sp, 80
+; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    ld.d $s1, $sp, 16 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s0, $sp, 24 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64F-NEXT:    addi.d $sp, $sp, 48
 ; LA64F-NEXT:    ret
 ;
 ; LA64D-LABEL: double_fmax_acquire:
 ; LA64D:       # %bb.0:
-; LA64D-NEXT:    addi.d $sp, $sp, -80
-; LA64D-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
+; LA64D-NEXT:    addi.d $sp, $sp, -48
+; LA64D-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
 ; LA64D-NEXT:    fst.d $fs0, $sp, 24 # 8-byte Folded Spill
 ; LA64D-NEXT:    move $fp, $a0
 ; LA64D-NEXT:    fld.d $fa0, $a0, 0
 ; LA64D-NEXT:    addi.d $a0, $zero, 1
 ; LA64D-NEXT:    movgr2fr.d $fa1, $a0
 ; LA64D-NEXT:    ffint.d.l $fs0, $fa1
-; LA64D-NEXT:    ori $s0, $zero, 8
-; LA64D-NEXT:    addi.d $s1, $sp, 16
-; LA64D-NEXT:    addi.d $s2, $sp, 8
-; LA64D-NEXT:    ori $s3, $zero, 2
 ; LA64D-NEXT:    .p2align 4, , 16
 ; LA64D-NEXT:  .LBB7_1: # %atomicrmw.start
 ; LA64D-NEXT:    # =>This Inner Loop Header: Depth=1
@@ -661,24 +569,20 @@ define double @double_fmax_acquire(ptr %p) nounwind {
 ; LA64D-NEXT:    fmax.d $fa1, $fa1, $fs0
 ; LA64D-NEXT:    fst.d $fa0, $sp, 16
 ; LA64D-NEXT:    fst.d $fa1, $sp, 8
-; LA64D-NEXT:    move $a0, $s0
+; LA64D-NEXT:    ori $a0, $zero, 8
+; LA64D-NEXT:    addi.d $a2, $sp, 16
+; LA64D-NEXT:    addi.d $a3, $sp, 8
+; LA64D-NEXT:    ori $a4, $zero, 2
+; LA64D-NEXT:    ori $a5, $zero, 2
 ; LA64D-NEXT:    move $a1, $fp
-; LA64D-NEXT:    move $a2, $s1
-; LA64D-NEXT:    move $a3, $s2
-; LA64D-NEXT:    move $a4, $s3
-; LA64D-NEXT:    move $a5, $s3
 ; LA64D-NEXT:    bl %plt(__atomic_compare_exchange)
 ; LA64D-NEXT:    fld.d $fa0, $sp, 16
 ; LA64D-NEXT:    beqz $a0, .LBB7_1
 ; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64D-NEXT:    fld.d $fs0, $sp, 24 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
-; LA64D-NEXT:    addi.d $sp, $sp, 80
+; LA64D-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64D-NEXT:    addi.d $sp, $sp, 48
 ; LA64D-NEXT:    ret
   %v = atomicrmw fmax ptr %p, double 1.0 acquire, align 4
   ret double %v
@@ -979,95 +883,71 @@ define float @float_fmax_release(ptr %p) nounwind {
 define double @double_fadd_release(ptr %p) nounwind {
 ; LA64F-LABEL: double_fadd_release:
 ; LA64F:       # %bb.0:
-; LA64F-NEXT:    addi.d $sp, $sp, -80
-; LA64F-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s4, $sp, 24 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s5, $sp, 16 # 8-byte Folded Spill
+; LA64F-NEXT:    addi.d $sp, $sp, -48
+; LA64F-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s0, $sp, 24 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s1, $sp, 16 # 8-byte Folded Spill
 ; LA64F-NEXT:    move $fp, $a0
-; LA64F-NEXT:    ld.d $s5, $a0, 0
+; LA64F-NEXT:    ld.d $s1, $a0, 0
 ; LA64F-NEXT:    lu52i.d $s0, $zero, 1023
-; LA64F-NEXT:    ori $s1, $zero, 8
-; LA64F-NEXT:    addi.d $s2, $sp, 8
-; LA64F-NEXT:    addi.d $s3, $sp, 0
-; LA64F-NEXT:    ori $s4, $zero, 3
 ; LA64F-NEXT:    .p2align 4, , 16
 ; LA64F-NEXT:  .LBB12_1: # %atomicrmw.start
 ; LA64F-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64F-NEXT:    move $a0, $s5
+; LA64F-NEXT:    move $a0, $s1
 ; LA64F-NEXT:    move $a1, $s0
 ; LA64F-NEXT:    bl %plt(__adddf3)
-; LA64F-NEXT:    st.d $s5, $sp, 8
+; LA64F-NEXT:    st.d $s1, $sp, 8
 ; LA64F-NEXT:    st.d $a0, $sp, 0
-; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    ori $a0, $zero, 8
+; LA64F-NEXT:    addi.d $a2, $sp, 8
+; LA64F-NEXT:    addi.d $a3, $sp, 0
+; LA64F-NEXT:    ori $a4, $zero, 3
 ; LA64F-NEXT:    move $a1, $fp
-; LA64F-NEXT:    move $a2, $s2
-; LA64F-NEXT:    move $a3, $s3
-; LA64F-NEXT:    move $a4, $s4
 ; LA64F-NEXT:    move $a5, $zero
 ; LA64F-NEXT:    bl %plt(__atomic_compare_exchange)
-; LA64F-NEXT:    ld.d $s5, $sp, 8
+; LA64F-NEXT:    ld.d $s1, $sp, 8
 ; LA64F-NEXT:    beqz $a0, .LBB12_1
 ; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64F-NEXT:    move $a0, $s5
-; LA64F-NEXT:    ld.d $s5, $sp, 16 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s4, $sp, 24 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
-; LA64F-NEXT:    addi.d $sp, $sp, 80
+; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    ld.d $s1, $sp, 16 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s0, $sp, 24 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64F-NEXT:    addi.d $sp, $sp, 48
 ; LA64F-NEXT:    ret
 ;
 ; LA64D-LABEL: double_fadd_release:
 ; LA64D:       # %bb.0:
-; LA64D-NEXT:    addi.d $sp, $sp, -80
-; LA64D-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
+; LA64D-NEXT:    addi.d $sp, $sp, -48
+; LA64D-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
 ; LA64D-NEXT:    fst.d $fs0, $sp, 24 # 8-byte Folded Spill
 ; LA64D-NEXT:    move $fp, $a0
 ; LA64D-NEXT:    fld.d $fa0, $a0, 0
 ; LA64D-NEXT:    addi.d $a0, $zero, 1
 ; LA64D-NEXT:    movgr2fr.d $fa1, $a0
 ; LA64D-NEXT:    ffint.d.l $fs0, $fa1
-; LA64D-NEXT:    ori $s0, $zero, 8
-; LA64D-NEXT:    addi.d $s1, $sp, 16
-; LA64D-NEXT:    addi.d $s2, $sp, 8
-; LA64D-NEXT:    ori $s3, $zero, 3
 ; LA64D-NEXT:    .p2align 4, , 16
 ; LA64D-NEXT:  .LBB12_1: # %atomicrmw.start
 ; LA64D-NEXT:    # =>This Inner Loop Header: Depth=1
 ; LA64D-NEXT:    fadd.d $fa1, $fa0, $fs0
 ; LA64D-NEXT:    fst.d $fa0, $sp, 16
 ; LA64D-NEXT:    fst.d $fa1, $sp, 8
-; LA64D-NEXT:    move $a0, $s0
+; LA64D-NEXT:    ori $a0, $zero, 8
+; LA64D-NEXT:    addi.d $a2, $sp, 16
+; LA64D-NEXT:    addi.d $a3, $sp, 8
+; LA64D-NEXT:    ori $a4, $zero, 3
 ; LA64D-NEXT:    move $a1, $fp
-; LA64D-NEXT:    move $a2, $s1
-; LA64D-NEXT:    move $a3, $s2
-; LA64D-NEXT:    move $a4, $s3
 ; LA64D-NEXT:    move $a5, $zero
 ; LA64D-NEXT:    bl %plt(__atomic_compare_exchange)
 ; LA64D-NEXT:    fld.d $fa0, $sp, 16
 ; LA64D-NEXT:    beqz $a0, .LBB12_1
 ; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64D-NEXT:    fld.d $fs0, $sp, 24 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
-; LA64D-NEXT:    addi.d $sp, $sp, 80
+; LA64D-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64D-NEXT:    addi.d $sp, $sp, 48
 ; LA64D-NEXT:    ret
   %v = atomicrmw fadd ptr %p, double 1.0 release, align 4
   ret double %v
@@ -1076,95 +956,71 @@ define double @double_fadd_release(ptr %p) nounwind {
 define double @double_fsub_release(ptr %p) nounwind {
 ; LA64F-LABEL: double_fsub_release:
 ; LA64F:       # %bb.0:
-; LA64F-NEXT:    addi.d $sp, $sp, -80
-; LA64F-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s4, $sp, 24 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s5, $sp, 16 # 8-byte Folded Spill
+; LA64F-NEXT:    addi.d $sp, $sp, -48
+; LA64F-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s0, $sp, 24 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s1, $sp, 16 # 8-byte Folded Spill
 ; LA64F-NEXT:    move $fp, $a0
-; LA64F-NEXT:    ld.d $s5, $a0, 0
+; LA64F-NEXT:    ld.d $s1, $a0, 0
 ; LA64F-NEXT:    lu52i.d $s0, $zero, -1025
-; LA64F-NEXT:    ori $s1, $zero, 8
-; LA64F-NEXT:    addi.d $s2, $sp, 8
-; LA64F-NEXT:    addi.d $s3, $sp, 0
-; LA64F-NEXT:    ori $s4, $zero, 3
 ; LA64F-NEXT:    .p2align 4, , 16
 ; LA64F-NEXT:  .LBB13_1: # %atomicrmw.start
 ; LA64F-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64F-NEXT:    move $a0, $s5
+; LA64F-NEXT:    move $a0, $s1
 ; LA64F-NEXT:    move $a1, $s0
 ; LA64F-NEXT:    bl %plt(__adddf3)
-; LA64F-NEXT:    st.d $s5, $sp, 8
+; LA64F-NEXT:    st.d $s1, $sp, 8
 ; LA64F-NEXT:    st.d $a0, $sp, 0
-; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    ori $a0, $zero, 8
+; LA64F-NEXT:    addi.d $a2, $sp, 8
+; LA64F-NEXT:    addi.d $a3, $sp, 0
+; LA64F-NEXT:    ori $a4, $zero, 3
 ; LA64F-NEXT:    move $a1, $fp
-; LA64F-NEXT:    move $a2, $s2
-; LA64F-NEXT:    move $a3, $s3
-; LA64F-NEXT:    move $a4, $s4
 ; LA64F-NEXT:    move $a5, $zero
 ; LA64F-NEXT:    bl %plt(__atomic_compare_exchange)
-; LA64F-NEXT:    ld.d $s5, $sp, 8
+; LA64F-NEXT:    ld.d $s1, $sp, 8
 ; LA64F-NEXT:    beqz $a0, .LBB13_1
 ; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64F-NEXT:    move $a0, $s5
-; LA64F-NEXT:    ld.d $s5, $sp, 16 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s4, $sp, 24 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
-; LA64F-NEXT:    addi.d $sp, $sp, 80
+; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    ld.d $s1, $sp, 16 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s0, $sp, 24 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64F-NEXT:    addi.d $sp, $sp, 48
 ; LA64F-NEXT:    ret
 ;
 ; LA64D-LABEL: double_fsub_release:
 ; LA64D:       # %bb.0:
-; LA64D-NEXT:    addi.d $sp, $sp, -80
-; LA64D-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
+; LA64D-NEXT:    addi.d $sp, $sp, -48
+; LA64D-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
 ; LA64D-NEXT:    fst.d $fs0, $sp, 24 # 8-byte Folded Spill
 ; LA64D-NEXT:    move $fp, $a0
 ; LA64D-NEXT:    fld.d $fa0, $a0, 0
 ; LA64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI13_0)
 ; LA64D-NEXT:    addi.d $a0, $a0, %pc_lo12(.LCPI13_0)
 ; LA64D-NEXT:    fld.d $fs0, $a0, 0
-; LA64D-NEXT:    ori $s0, $zero, 8
-; LA64D-NEXT:    addi.d $s1, $sp, 16
-; LA64D-NEXT:    addi.d $s2, $sp, 8
-; LA64D-NEXT:    ori $s3, $zero, 3
 ; LA64D-NEXT:    .p2align 4, , 16
 ; LA64D-NEXT:  .LBB13_1: # %atomicrmw.start
 ; LA64D-NEXT:    # =>This Inner Loop Header: Depth=1
 ; LA64D-NEXT:    fadd.d $fa1, $fa0, $fs0
 ; LA64D-NEXT:    fst.d $fa0, $sp, 16
 ; LA64D-NEXT:    fst.d $fa1, $sp, 8
-; LA64D-NEXT:    move $a0, $s0
+; LA64D-NEXT:    ori $a0, $zero, 8
+; LA64D-NEXT:    addi.d $a2, $sp, 16
+; LA64D-NEXT:    addi.d $a3, $sp, 8
+; LA64D-NEXT:    ori $a4, $zero, 3
 ; LA64D-NEXT:    move $a1, $fp
-; LA64D-NEXT:    move $a2, $s1
-; LA64D-NEXT:    move $a3, $s2
-; LA64D-NEXT:    move $a4, $s3
 ; LA64D-NEXT:    move $a5, $zero
 ; LA64D-NEXT:    bl %plt(__atomic_compare_exchange)
 ; LA64D-NEXT:    fld.d $fa0, $sp, 16
 ; LA64D-NEXT:    beqz $a0, .LBB13_1
 ; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64D-NEXT:    fld.d $fs0, $sp, 24 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
-; LA64D-NEXT:    addi.d $sp, $sp, 80
+; LA64D-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64D-NEXT:    addi.d $sp, $sp, 48
 ; LA64D-NEXT:    ret
   %v = atomicrmw fsub ptr %p, double 1.0 release, align 4
   ret double %v
@@ -1173,71 +1029,51 @@ define double @double_fsub_release(ptr %p) nounwind {
 define double @double_fmin_release(ptr %p) nounwind {
 ; LA64F-LABEL: double_fmin_release:
 ; LA64F:       # %bb.0:
-; LA64F-NEXT:    addi.d $sp, $sp, -80
-; LA64F-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s4, $sp, 24 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s5, $sp, 16 # 8-byte Folded Spill
+; LA64F-NEXT:    addi.d $sp, $sp, -48
+; LA64F-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s0, $sp, 24 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s1, $sp, 16 # 8-byte Folded Spill
 ; LA64F-NEXT:    move $fp, $a0
-; LA64F-NEXT:    ld.d $s5, $a0, 0
+; LA64F-NEXT:    ld.d $s1, $a0, 0
 ; LA64F-NEXT:    lu52i.d $s0, $zero, 1023
-; LA64F-NEXT:    ori $s1, $zero, 8
-; LA64F-NEXT:    addi.d $s2, $sp, 8
-; LA64F-NEXT:    addi.d $s3, $sp, 0
-; LA64F-NEXT:    ori $s4, $zero, 3
 ; LA64F-NEXT:    .p2align 4, , 16
 ; LA64F-NEXT:  .LBB14_1: # %atomicrmw.start
 ; LA64F-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64F-NEXT:    move $a0, $s5
+; LA64F-NEXT:    move $a0, $s1
 ; LA64F-NEXT:    move $a1, $s0
 ; LA64F-NEXT:    bl %plt(fmin)
-; LA64F-NEXT:    st.d $s5, $sp, 8
+; LA64F-NEXT:    st.d $s1, $sp, 8
 ; LA64F-NEXT:    st.d $a0, $sp, 0
-; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    ori $a0, $zero, 8
+; LA64F-NEXT:    addi.d $a2, $sp, 8
+; LA64F-NEXT:    addi.d $a3, $sp, 0
+; LA64F-NEXT:    ori $a4, $zero, 3
 ; LA64F-NEXT:    move $a1, $fp
-; LA64F-NEXT:    move $a2, $s2
-; LA64F-NEXT:    move $a3, $s3
-; LA64F-NEXT:    move $a4, $s4
 ; LA64F-NEXT:    move $a5, $zero
 ; LA64F-NEXT:    bl %plt(__atomic_compare_exchange)
-; LA64F-NEXT:    ld.d $s5, $sp, 8
+; LA64F-NEXT:    ld.d $s1, $sp, 8
 ; LA64F-NEXT:    beqz $a0, .LBB14_1
 ; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64F-NEXT:    move $a0, $s5
-; LA64F-NEXT:    ld.d $s5, $sp, 16 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s4, $sp, 24 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
-; LA64F-NEXT:    addi.d $sp, $sp, 80
+; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    ld.d $s1, $sp, 16 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s0, $sp, 24 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64F-NEXT:    addi.d $sp, $sp, 48
 ; LA64F-NEXT:    ret
 ;
 ; LA64D-LABEL: double_fmin_release:
 ; LA64D:       # %bb.0:
-; LA64D-NEXT:    addi.d $sp, $sp, -80
-; LA64D-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
+; LA64D-NEXT:    addi.d $sp, $sp, -48
+; LA64D-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
 ; LA64D-NEXT:    fst.d $fs0, $sp, 24 # 8-byte Folded Spill
 ; LA64D-NEXT:    move $fp, $a0
 ; LA64D-NEXT:    fld.d $fa0, $a0, 0
 ; LA64D-NEXT:    addi.d $a0, $zero, 1
 ; LA64D-NEXT:    movgr2fr.d $fa1, $a0
 ; LA64D-NEXT:    ffint.d.l $fs0, $fa1
-; LA64D-NEXT:    ori $s0, $zero, 8
-; LA64D-NEXT:    addi.d $s1, $sp, 16
-; LA64D-NEXT:    addi.d $s2, $sp, 8
-; LA64D-NEXT:    ori $s3, $zero, 3
 ; LA64D-NEXT:    .p2align 4, , 16
 ; LA64D-NEXT:  .LBB14_1: # %atomicrmw.start
 ; LA64D-NEXT:    # =>This Inner Loop Header: Depth=1
@@ -1245,24 +1081,20 @@ define double @double_fmin_release(ptr %p) nounwind {
 ; LA64D-NEXT:    fmin.d $fa1, $fa1, $fs0
 ; LA64D-NEXT:    fst.d $fa0, $sp, 16
 ; LA64D-NEXT:    fst.d $fa1, $sp, 8
-; LA64D-NEXT:    move $a0, $s0
+; LA64D-NEXT:    ori $a0, $zero, 8
+; LA64D-NEXT:    addi.d $a2, $sp, 16
+; LA64D-NEXT:    addi.d $a3, $sp, 8
+; LA64D-NEXT:    ori $a4, $zero, 3
 ; LA64D-NEXT:    move $a1, $fp
-; LA64D-NEXT:    move $a2, $s1
-; LA64D-NEXT:    move $a3, $s2
-; LA64D-NEXT:    move $a4, $s3
 ; LA64D-NEXT:    move $a5, $zero
 ; LA64D-NEXT:    bl %plt(__atomic_compare_exchange)
 ; LA64D-NEXT:    fld.d $fa0, $sp, 16
 ; LA64D-NEXT:    beqz $a0, .LBB14_1
 ; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64D-NEXT:    fld.d $fs0, $sp, 24 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
-; LA64D-NEXT:    addi.d $sp, $sp, 80
+; LA64D-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64D-NEXT:    addi.d $sp, $sp, 48
 ; LA64D-NEXT:    ret
   %v = atomicrmw fmin ptr %p, double 1.0 release, align 4
   ret double %v
@@ -1271,71 +1103,51 @@ define double @double_fmin_release(ptr %p) nounwind {
 define double @double_fmax_release(ptr %p) nounwind {
 ; LA64F-LABEL: double_fmax_release:
 ; LA64F:       # %bb.0:
-; LA64F-NEXT:    addi.d $sp, $sp, -80
-; LA64F-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s4, $sp, 24 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s5, $sp, 16 # 8-byte Folded Spill
+; LA64F-NEXT:    addi.d $sp, $sp, -48
+; LA64F-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s0, $sp, 24 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s1, $sp, 16 # 8-byte Folded Spill
 ; LA64F-NEXT:    move $fp, $a0
-; LA64F-NEXT:    ld.d $s5, $a0, 0
+; LA64F-NEXT:    ld.d $s1, $a0, 0
 ; LA64F-NEXT:    lu52i.d $s0, $zero, 1023
-; LA64F-NEXT:    ori $s1, $zero, 8
-; LA64F-NEXT:    addi.d $s2, $sp, 8
-; LA64F-NEXT:    addi.d $s3, $sp, 0
-; LA64F-NEXT:    ori $s4, $zero, 3
 ; LA64F-NEXT:    .p2align 4, , 16
 ; LA64F-NEXT:  .LBB15_1: # %atomicrmw.start
 ; LA64F-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64F-NEXT:    move $a0, $s5
+; LA64F-NEXT:    move $a0, $s1
 ; LA64F-NEXT:    move $a1, $s0
 ; LA64F-NEXT:    bl %plt(fmax)
-; LA64F-NEXT:    st.d $s5, $sp, 8
+; LA64F-NEXT:    st.d $s1, $sp, 8
 ; LA64F-NEXT:    st.d $a0, $sp, 0
-; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    ori $a0, $zero, 8
+; LA64F-NEXT:    addi.d $a2, $sp, 8
+; LA64F-NEXT:    addi.d $a3, $sp, 0
+; LA64F-NEXT:    ori $a4, $zero, 3
 ; LA64F-NEXT:    move $a1, $fp
-; LA64F-NEXT:    move $a2, $s2
-; LA64F-NEXT:    move $a3, $s3
-; LA64F-NEXT:    move $a4, $s4
 ; LA64F-NEXT:    move $a5, $zero
 ; LA64F-NEXT:    bl %plt(__atomic_compare_exchange)
-; LA64F-NEXT:    ld.d $s5, $sp, 8
+; LA64F-NEXT:    ld.d $s1, $sp, 8
 ; LA64F-NEXT:    beqz $a0, .LBB15_1
 ; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64F-NEXT:    move $a0, $s5
-; LA64F-NEXT:    ld.d $s5, $sp, 16 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s4, $sp, 24 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
-; LA64F-NEXT:    addi.d $sp, $sp, 80
+; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    ld.d $s1, $sp, 16 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s0, $sp, 24 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64F-NEXT:    addi.d $sp, $sp, 48
 ; LA64F-NEXT:    ret
 ;
 ; LA64D-LABEL: double_fmax_release:
 ; LA64D:       # %bb.0:
-; LA64D-NEXT:    addi.d $sp, $sp, -80
-; LA64D-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
+; LA64D-NEXT:    addi.d $sp, $sp, -48
+; LA64D-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
 ; LA64D-NEXT:    fst.d $fs0, $sp, 24 # 8-byte Folded Spill
 ; LA64D-NEXT:    move $fp, $a0
 ; LA64D-NEXT:    fld.d $fa0, $a0, 0
 ; LA64D-NEXT:    addi.d $a0, $zero, 1
 ; LA64D-NEXT:    movgr2fr.d $fa1, $a0
 ; LA64D-NEXT:    ffint.d.l $fs0, $fa1
-; LA64D-NEXT:    ori $s0, $zero, 8
-; LA64D-NEXT:    addi.d $s1, $sp, 16
-; LA64D-NEXT:    addi.d $s2, $sp, 8
-; LA64D-NEXT:    ori $s3, $zero, 3
 ; LA64D-NEXT:    .p2align 4, , 16
 ; LA64D-NEXT:  .LBB15_1: # %atomicrmw.start
 ; LA64D-NEXT:    # =>This Inner Loop Header: Depth=1
@@ -1343,24 +1155,20 @@ define double @double_fmax_release(ptr %p) nounwind {
 ; LA64D-NEXT:    fmax.d $fa1, $fa1, $fs0
 ; LA64D-NEXT:    fst.d $fa0, $sp, 16
 ; LA64D-NEXT:    fst.d $fa1, $sp, 8
-; LA64D-NEXT:    move $a0, $s0
+; LA64D-NEXT:    ori $a0, $zero, 8
+; LA64D-NEXT:    addi.d $a2, $sp, 16
+; LA64D-NEXT:    addi.d $a3, $sp, 8
+; LA64D-NEXT:    ori $a4, $zero, 3
 ; LA64D-NEXT:    move $a1, $fp
-; LA64D-NEXT:    move $a2, $s1
-; LA64D-NEXT:    move $a3, $s2
-; LA64D-NEXT:    move $a4, $s3
 ; LA64D-NEXT:    move $a5, $zero
 ; LA64D-NEXT:    bl %plt(__atomic_compare_exchange)
 ; LA64D-NEXT:    fld.d $fa0, $sp, 16
 ; LA64D-NEXT:    beqz $a0, .LBB15_1
 ; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64D-NEXT:    fld.d $fs0, $sp, 24 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
-; LA64D-NEXT:    addi.d $sp, $sp, 80
+; LA64D-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64D-NEXT:    addi.d $sp, $sp, 48
 ; LA64D-NEXT:    ret
   %v = atomicrmw fmax ptr %p, double 1.0 release, align 4
   ret double %v
@@ -1661,101 +1469,71 @@ define float @float_fmax_acq_rel(ptr %p) nounwind {
 define double @double_fadd_acq_rel(ptr %p) nounwind {
 ; LA64F-LABEL: double_fadd_acq_rel:
 ; LA64F:       # %bb.0:
-; LA64F-NEXT:    addi.d $sp, $sp, -96
-; LA64F-NEXT:    st.d $ra, $sp, 88 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $fp, $sp, 80 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s0, $sp, 72 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s1, $sp, 64 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s2, $sp, 56 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s3, $sp, 48 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s4, $sp, 40 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s5, $sp, 32 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s6, $sp, 24 # 8-byte Folded Spill
+; LA64F-NEXT:    addi.d $sp, $sp, -48
+; LA64F-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s0, $sp, 24 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s1, $sp, 16 # 8-byte Folded Spill
 ; LA64F-NEXT:    move $fp, $a0
-; LA64F-NEXT:    ld.d $s6, $a0, 0
+; LA64F-NEXT:    ld.d $s1, $a0, 0
 ; LA64F-NEXT:    lu52i.d $s0, $zero, 1023
-; LA64F-NEXT:    ori $s1, $zero, 8
-; LA64F-NEXT:    addi.d $s2, $sp, 16
-; LA64F-NEXT:    addi.d $s3, $sp, 8
-; LA64F-NEXT:    ori $s4, $zero, 4
-; LA64F-NEXT:    ori $s5, $zero, 2
 ; LA64F-NEXT:    .p2align 4, , 16
 ; LA64F-NEXT:  .LBB20_1: # %atomicrmw.start
 ; LA64F-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64F-NEXT:    move $a0, $s6
+; LA64F-NEXT:    move $a0, $s1
 ; LA64F-NEXT:    move $a1, $s0
 ; LA64F-NEXT:    bl %plt(__adddf3)
-; LA64F-NEXT:    st.d $s6, $sp, 16
-; LA64F-NEXT:    st.d $a0, $sp, 8
-; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    st.d $s1, $sp, 8
+; LA64F-NEXT:    st.d $a0, $sp, 0
+; LA64F-NEXT:    ori $a0, $zero, 8
+; LA64F-NEXT:    addi.d $a2, $sp, 8
+; LA64F-NEXT:    addi.d $a3, $sp, 0
+; LA64F-NEXT:    ori $a4, $zero, 4
+; LA64F-NEXT:    ori $a5, $zero, 2
 ; LA64F-NEXT:    move $a1, $fp
-; LA64F-NEXT:    move $a2, $s2
-; LA64F-NEXT:    move $a3, $s3
-; LA64F-NEXT:    move $a4, $s4
-; LA64F-NEXT:    move $a5, $s5
 ; LA64F-NEXT:    bl %plt(__atomic_compare_exchange)
-; LA64F-NEXT:    ld.d $s6, $sp, 16
+; LA64F-NEXT:    ld.d $s1, $sp, 8
 ; LA64F-NEXT:    beqz $a0, .LBB20_1
 ; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64F-NEXT:    move $a0, $s6
-; LA64F-NEXT:    ld.d $s6, $sp, 24 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s5, $sp, 32 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s4, $sp, 40 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s3, $sp, 48 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s2, $sp, 56 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s1, $sp, 64 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s0, $sp, 72 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $fp, $sp, 80 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $ra, $sp, 88 # 8-byte Folded Reload
-; LA64F-NEXT:    addi.d $sp, $sp, 96
+; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    ld.d $s1, $sp, 16 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s0, $sp, 24 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64F-NEXT:    addi.d $sp, $sp, 48
 ; LA64F-NEXT:    ret
 ;
 ; LA64D-LABEL: double_fadd_acq_rel:
 ; LA64D:       # %bb.0:
-; LA64D-NEXT:    addi.d $sp, $sp, -80
-; LA64D-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s4, $sp, 24 # 8-byte Folded Spill
-; LA64D-NEXT:    fst.d $fs0, $sp, 16 # 8-byte Folded Spill
+; LA64D-NEXT:    addi.d $sp, $sp, -48
+; LA64D-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
+; LA64D-NEXT:    fst.d $fs0, $sp, 24 # 8-byte Folded Spill
 ; LA64D-NEXT:    move $fp, $a0
 ; LA64D-NEXT:    fld.d $fa0, $a0, 0
 ; LA64D-NEXT:    addi.d $a0, $zero, 1
 ; LA64D-NEXT:    movgr2fr.d $fa1, $a0
 ; LA64D-NEXT:    ffint.d.l $fs0, $fa1
-; LA64D-NEXT:    ori $s0, $zero, 8
-; LA64D-NEXT:    addi.d $s1, $sp, 8
-; LA64D-NEXT:    addi.d $s2, $sp, 0
-; LA64D-NEXT:    ori $s3, $zero, 4
-; LA64D-NEXT:    ori $s4, $zero, 2
 ; LA64D-NEXT:    .p2align 4, , 16
 ; LA64D-NEXT:  .LBB20_1: # %atomicrmw.start
 ; LA64D-NEXT:    # =>This Inner Loop Header: Depth=1
 ; LA64D-NEXT:    fadd.d $fa1, $fa0, $fs0
-; LA64D-NEXT:    fst.d $fa0, $sp, 8
-; LA64D-NEXT:    fst.d $fa1, $sp, 0
-; LA64D-NEXT:    move $a0, $s0
+; LA64D-NEXT:    fst.d $fa0, $sp, 16
+; LA64D-NEXT:    fst.d $fa1, $sp, 8
+; LA64D-NEXT:    ori $a0, $zero, 8
+; LA64D-NEXT:    addi.d $a2, $sp, 16
+; LA64D-NEXT:    addi.d $a3, $sp, 8
+; LA64D-NEXT:    ori $a4, $zero, 4
+; LA64D-NEXT:    ori $a5, $zero, 2
 ; LA64D-NEXT:    move $a1, $fp
-; LA64D-NEXT:    move $a2, $s1
-; LA64D-NEXT:    move $a3, $s2
-; LA64D-NEXT:    move $a4, $s3
-; LA64D-NEXT:    move $a5, $s4
 ; LA64D-NEXT:    bl %plt(__atomic_compare_exchange)
-; LA64D-NEXT:    fld.d $fa0, $sp, 8
+; LA64D-NEXT:    fld.d $fa0, $sp, 16
 ; LA64D-NEXT:    beqz $a0, .LBB20_1
 ; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64D-NEXT:    fld.d $fs0, $sp, 16 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s4, $sp, 24 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
-; LA64D-NEXT:    addi.d $sp, $sp, 80
+; LA64D-NEXT:    fld.d $fs0, $sp, 24 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64D-NEXT:    addi.d $sp, $sp, 48
 ; LA64D-NEXT:    ret
   %v = atomicrmw fadd ptr %p, double 1.0 acq_rel, align 4
   ret double %v
@@ -1764,101 +1542,71 @@ define double @double_fadd_acq_rel(ptr %p) nounwind {
 define double @double_fsub_acq_rel(ptr %p) nounwind {
 ; LA64F-LABEL: double_fsub_acq_rel:
 ; LA64F:       # %bb.0:
-; LA64F-NEXT:    addi.d $sp, $sp, -96
-; LA64F-NEXT:    st.d $ra, $sp, 88 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $fp, $sp, 80 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s0, $sp, 72 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s1, $sp, 64 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s2, $sp, 56 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s3, $sp, 48 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s4, $sp, 40 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s5, $sp, 32 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s6, $sp, 24 # 8-byte Folded Spill
+; LA64F-NEXT:    addi.d $sp, $sp, -48
+; LA64F-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s0, $sp, 24 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s1, $sp, 16 # 8-byte Folded Spill
 ; LA64F-NEXT:    move $fp, $a0
-; LA64F-NEXT:    ld.d $s6, $a0, 0
+; LA64F-NEXT:    ld.d $s1, $a0, 0
 ; LA64F-NEXT:    lu52i.d $s0, $zero, -1025
-; LA64F-NEXT:    ori $s1, $zero, 8
-; LA64F-NEXT:    addi.d $s2, $sp, 16
-; LA64F-NEXT:    addi.d $s3, $sp, 8
-; LA64F-NEXT:    ori $s4, $zero, 4
-; LA64F-NEXT:    ori $s5, $zero, 2
 ; LA64F-NEXT:    .p2align 4, , 16
 ; LA64F-NEXT:  .LBB21_1: # %atomicrmw.start
 ; LA64F-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64F-NEXT:    move $a0, $s6
+; LA64F-NEXT:    move $a0, $s1
 ; LA64F-NEXT:    move $a1, $s0
 ; LA64F-NEXT:    bl %plt(__adddf3)
-; LA64F-NEXT:    st.d $s6, $sp, 16
-; LA64F-NEXT:    st.d $a0, $sp, 8
-; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    st.d $s1, $sp, 8
+; LA64F-NEXT:    st.d $a0, $sp, 0
+; LA64F-NEXT:    ori $a0, $zero, 8
+; LA64F-NEXT:    addi.d $a2, $sp, 8
+; LA64F-NEXT:    addi.d $a3, $sp, 0
+; LA64F-NEXT:    ori $a4, $zero, 4
+; LA64F-NEXT:    ori $a5, $zero, 2
 ; LA64F-NEXT:    move $a1, $fp
-; LA64F-NEXT:    move $a2, $s2
-; LA64F-NEXT:    move $a3, $s3
-; LA64F-NEXT:    move $a4, $s4
-; LA64F-NEXT:    move $a5, $s5
 ; LA64F-NEXT:    bl %plt(__atomic_compare_exchange)
-; LA64F-NEXT:    ld.d $s6, $sp, 16
+; LA64F-NEXT:    ld.d $s1, $sp, 8
 ; LA64F-NEXT:    beqz $a0, .LBB21_1
 ; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64F-NEXT:    move $a0, $s6
-; LA64F-NEXT:    ld.d $s6, $sp, 24 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s5, $sp, 32 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s4, $sp, 40 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s3, $sp, 48 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s2, $sp, 56 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s1, $sp, 64 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s0, $sp, 72 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $fp, $sp, 80 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $ra, $sp, 88 # 8-byte Folded Reload
-; LA64F-NEXT:    addi.d $sp, $sp, 96
+; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    ld.d $s1, $sp, 16 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s0, $sp, 24 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64F-NEXT:    addi.d $sp, $sp, 48
 ; LA64F-NEXT:    ret
 ;
 ; LA64D-LABEL: double_fsub_acq_rel:
 ; LA64D:       # %bb.0:
-; LA64D-NEXT:    addi.d $sp, $sp, -80
-; LA64D-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s4, $sp, 24 # 8-byte Folded Spill
-; LA64D-NEXT:    fst.d $fs0, $sp, 16 # 8-byte Folded Spill
+; LA64D-NEXT:    addi.d $sp, $sp, -48
+; LA64D-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
+; LA64D-NEXT:    fst.d $fs0, $sp, 24 # 8-byte Folded Spill
 ; LA64D-NEXT:    move $fp, $a0
 ; LA64D-NEXT:    fld.d $fa0, $a0, 0
 ; LA64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI21_0)
 ; LA64D-NEXT:    addi.d $a0, $a0, %pc_lo12(.LCPI21_0)
 ; LA64D-NEXT:    fld.d $fs0, $a0, 0
-; LA64D-NEXT:    ori $s0, $zero, 8
-; LA64D-NEXT:    addi.d $s1, $sp, 8
-; LA64D-NEXT:    addi.d $s2, $sp, 0
-; LA64D-NEXT:    ori $s3, $zero, 4
-; LA64D-NEXT:    ori $s4, $zero, 2
 ; LA64D-NEXT:    .p2align 4, , 16
 ; LA64D-NEXT:  .LBB21_1: # %atomicrmw.start
 ; LA64D-NEXT:    # =>This Inner Loop Header: Depth=1
 ; LA64D-NEXT:    fadd.d $fa1, $fa0, $fs0
-; LA64D-NEXT:    fst.d $fa0, $sp, 8
-; LA64D-NEXT:    fst.d $fa1, $sp, 0
-; LA64D-NEXT:    move $a0, $s0
+; LA64D-NEXT:    fst.d $fa0, $sp, 16
+; LA64D-NEXT:    fst.d $fa1, $sp, 8
+; LA64D-NEXT:    ori $a0, $zero, 8
+; LA64D-NEXT:    addi.d $a2, $sp, 16
+; LA64D-NEXT:    addi.d $a3, $sp, 8
+; LA64D-NEXT:    ori $a4, $zero, 4
+; LA64D-NEXT:    ori $a5, $zero, 2
 ; LA64D-NEXT:    move $a1, $fp
-; LA64D-NEXT:    move $a2, $s1
-; LA64D-NEXT:    move $a3, $s2
-; LA64D-NEXT:    move $a4, $s3
-; LA64D-NEXT:    move $a5, $s4
 ; LA64D-NEXT:    bl %plt(__atomic_compare_exchange)
-; LA64D-NEXT:    fld.d $fa0, $sp, 8
+; LA64D-NEXT:    fld.d $fa0, $sp, 16
 ; LA64D-NEXT:    beqz $a0, .LBB21_1
 ; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64D-NEXT:    fld.d $fs0, $sp, 16 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s4, $sp, 24 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
-; LA64D-NEXT:    addi.d $sp, $sp, 80
+; LA64D-NEXT:    fld.d $fs0, $sp, 24 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64D-NEXT:    addi.d $sp, $sp, 48
 ; LA64D-NEXT:    ret
   %v = atomicrmw fsub ptr %p, double 1.0 acq_rel, align 4
   ret double %v
@@ -1867,102 +1615,72 @@ define double @double_fsub_acq_rel(ptr %p) nounwind {
 define double @double_fmin_acq_rel(ptr %p) nounwind {
 ; LA64F-LABEL: double_fmin_acq_rel:
 ; LA64F:       # %bb.0:
-; LA64F-NEXT:    addi.d $sp, $sp, -96
-; LA64F-NEXT:    st.d $ra, $sp, 88 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $fp, $sp, 80 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s0, $sp, 72 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s1, $sp, 64 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s2, $sp, 56 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s3, $sp, 48 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s4, $sp, 40 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s5, $sp, 32 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s6, $sp, 24 # 8-byte Folded Spill
+; LA64F-NEXT:    addi.d $sp, $sp, -48
+; LA64F-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s0, $sp, 24 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s1, $sp, 16 # 8-byte Folded Spill
 ; LA64F-NEXT:    move $fp, $a0
-; LA64F-NEXT:    ld.d $s6, $a0, 0
+; LA64F-NEXT:    ld.d $s1, $a0, 0
 ; LA64F-NEXT:    lu52i.d $s0, $zero, 1023
-; LA64F-NEXT:    ori $s1, $zero, 8
-; LA64F-NEXT:    addi.d $s2, $sp, 16
-; LA64F-NEXT:    addi.d $s3, $sp, 8
-; LA64F-NEXT:    ori $s4, $zero, 4
-; LA64F-NEXT:    ori $s5, $zero, 2
 ; LA64F-NEXT:    .p2align 4, , 16
 ; LA64F-NEXT:  .LBB22_1: # %atomicrmw.start
 ; LA64F-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64F-NEXT:    move $a0, $s6
+; LA64F-NEXT:    move $a0, $s1
 ; LA64F-NEXT:    move $a1, $s0
 ; LA64F-NEXT:    bl %plt(fmin)
-; LA64F-NEXT:    st.d $s6, $sp, 16
-; LA64F-NEXT:    st.d $a0, $sp, 8
-; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    st.d $s1, $sp, 8
+; LA64F-NEXT:    st.d $a0, $sp, 0
+; LA64F-NEXT:    ori $a0, $zero, 8
+; LA64F-NEXT:    addi.d $a2, $sp, 8
+; LA64F-NEXT:    addi.d $a3, $sp, 0
+; LA64F-NEXT:    ori $a4, $zero, 4
+; LA64F-NEXT:    ori $a5, $zero, 2
 ; LA64F-NEXT:    move $a1, $fp
-; LA64F-NEXT:    move $a2, $s2
-; LA64F-NEXT:    move $a3, $s3
-; LA64F-NEXT:    move $a4, $s4
-; LA64F-NEXT:    move $a5, $s5
 ; LA64F-NEXT:    bl %plt(__atomic_compare_exchange)
-; LA64F-NEXT:    ld.d $s6, $sp, 16
+; LA64F-NEXT:    ld.d $s1, $sp, 8
 ; LA64F-NEXT:    beqz $a0, .LBB22_1
 ; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64F-NEXT:    move $a0, $s6
-; LA64F-NEXT:    ld.d $s6, $sp, 24 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s5, $sp, 32 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s4, $sp, 40 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s3, $sp, 48 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s2, $sp, 56 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s1, $sp, 64 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s0, $sp, 72 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $fp, $sp, 80 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $ra, $sp, 88 # 8-byte Folded Reload
-; LA64F-NEXT:    addi.d $sp, $sp, 96
+; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    ld.d $s1, $sp, 16 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s0, $sp, 24 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64F-NEXT:    addi.d $sp, $sp, 48
 ; LA64F-NEXT:    ret
 ;
 ; LA64D-LABEL: double_fmin_acq_rel:
 ; LA64D:       # %bb.0:
-; LA64D-NEXT:    addi.d $sp, $sp, -80
-; LA64D-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s4, $sp, 24 # 8-byte Folded Spill
-; LA64D-NEXT:    fst.d $fs0, $sp, 16 # 8-byte Folded Spill
+; LA64D-NEXT:    addi.d $sp, $sp, -48
+; LA64D-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
+; LA64D-NEXT:    fst.d $fs0, $sp, 24 # 8-byte Folded Spill
 ; LA64D-NEXT:    move $fp, $a0
 ; LA64D-NEXT:    fld.d $fa0, $a0, 0
 ; LA64D-NEXT:    addi.d $a0, $zero, 1
 ; LA64D-NEXT:    movgr2fr.d $fa1, $a0
 ; LA64D-NEXT:    ffint.d.l $fs0, $fa1
-; LA64D-NEXT:    ori $s0, $zero, 8
-; LA64D-NEXT:    addi.d $s1, $sp, 8
-; LA64D-NEXT:    addi.d $s2, $sp, 0
-; LA64D-NEXT:    ori $s3, $zero, 4
-; LA64D-NEXT:    ori $s4, $zero, 2
 ; LA64D-NEXT:    .p2align 4, , 16
 ; LA64D-NEXT:  .LBB22_1: # %atomicrmw.start
 ; LA64D-NEXT:    # =>This Inner Loop Header: Depth=1
 ; LA64D-NEXT:    fmax.d $fa1, $fa0, $fa0
 ; LA64D-NEXT:    fmin.d $fa1, $fa1, $fs0
-; LA64D-NEXT:    fst.d $fa0, $sp, 8
-; LA64D-NEXT:    fst.d $fa1, $sp, 0
-; LA64D-NEXT:    move $a0, $s0
+; LA64D-NEXT:    fst.d $fa0, $sp, 16
+; LA64D-NEXT:    fst.d $fa1, $sp, 8
+; LA64D-NEXT:    ori $a0, $zero, 8
+; LA64D-NEXT:    addi.d $a2, $sp, 16
+; LA64D-NEXT:    addi.d $a3, $sp, 8
+; LA64D-NEXT:    ori $a4, $zero, 4
+; LA64D-NEXT:    ori $a5, $zero, 2
 ; LA64D-NEXT:    move $a1, $fp
-; LA64D-NEXT:    move $a2, $s1
-; LA64D-NEXT:    move $a3, $s2
-; LA64D-NEXT:    move $a4, $s3
-; LA64D-NEXT:    move $a5, $s4
 ; LA64D-NEXT:    bl %plt(__atomic_compare_exchange)
-; LA64D-NEXT:    fld.d $fa0, $sp, 8
+; LA64D-NEXT:    fld.d $fa0, $sp, 16
 ; LA64D-NEXT:    beqz $a0, .LBB22_1
 ; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64D-NEXT:    fld.d $fs0, $sp, 16 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s4, $sp, 24 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
-; LA64D-NEXT:    addi.d $sp, $sp, 80
+; LA64D-NEXT:    fld.d $fs0, $sp, 24 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64D-NEXT:    addi.d $sp, $sp, 48
 ; LA64D-NEXT:    ret
   %v = atomicrmw fmin ptr %p, double 1.0 acq_rel, align 4
   ret double %v
@@ -1971,102 +1689,72 @@ define double @double_fmin_acq_rel(ptr %p) nounwind {
 define double @double_fmax_acq_rel(ptr %p) nounwind {
 ; LA64F-LABEL: double_fmax_acq_rel:
 ; LA64F:       # %bb.0:
-; LA64F-NEXT:    addi.d $sp, $sp, -96
-; LA64F-NEXT:    st.d $ra, $sp, 88 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $fp, $sp, 80 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s0, $sp, 72 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s1, $sp, 64 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s2, $sp, 56 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s3, $sp, 48 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s4, $sp, 40 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s5, $sp, 32 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s6, $sp, 24 # 8-byte Folded Spill
+; LA64F-NEXT:    addi.d $sp, $sp, -48
+; LA64F-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s0, $sp, 24 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s1, $sp, 16 # 8-byte Folded Spill
 ; LA64F-NEXT:    move $fp, $a0
-; LA64F-NEXT:    ld.d $s6, $a0, 0
+; LA64F-NEXT:    ld.d $s1, $a0, 0
 ; LA64F-NEXT:    lu52i.d $s0, $zero, 1023
-; LA64F-NEXT:    ori $s1, $zero, 8
-; LA64F-NEXT:    addi.d $s2, $sp, 16
-; LA64F-NEXT:    addi.d $s3, $sp, 8
-; LA64F-NEXT:    ori $s4, $zero, 4
-; LA64F-NEXT:    ori $s5, $zero, 2
 ; LA64F-NEXT:    .p2align 4, , 16
 ; LA64F-NEXT:  .LBB23_1: # %atomicrmw.start
 ; LA64F-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64F-NEXT:    move $a0, $s6
+; LA64F-NEXT:    move $a0, $s1
 ; LA64F-NEXT:    move $a1, $s0
 ; LA64F-NEXT:    bl %plt(fmax)
-; LA64F-NEXT:    st.d $s6, $sp, 16
-; LA64F-NEXT:    st.d $a0, $sp, 8
-; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    st.d $s1, $sp, 8
+; LA64F-NEXT:    st.d $a0, $sp, 0
+; LA64F-NEXT:    ori $a0, $zero, 8
+; LA64F-NEXT:    addi.d $a2, $sp, 8
+; LA64F-NEXT:    addi.d $a3, $sp, 0
+; LA64F-NEXT:    ori $a4, $zero, 4
+; LA64F-NEXT:    ori $a5, $zero, 2
 ; LA64F-NEXT:    move $a1, $fp
-; LA64F-NEXT:    move $a2, $s2
-; LA64F-NEXT:    move $a3, $s3
-; LA64F-NEXT:    move $a4, $s4
-; LA64F-NEXT:    move $a5, $s5
 ; LA64F-NEXT:    bl %plt(__atomic_compare_exchange)
-; LA64F-NEXT:    ld.d $s6, $sp, 16
+; LA64F-NEXT:    ld.d $s1, $sp, 8
 ; LA64F-NEXT:    beqz $a0, .LBB23_1
 ; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64F-NEXT:    move $a0, $s6
-; LA64F-NEXT:    ld.d $s6, $sp, 24 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s5, $sp, 32 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s4, $sp, 40 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s3, $sp, 48 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s2, $sp, 56 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s1, $sp, 64 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s0, $sp, 72 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $fp, $sp, 80 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $ra, $sp, 88 # 8-byte Folded Reload
-; LA64F-NEXT:    addi.d $sp, $sp, 96
+; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    ld.d $s1, $sp, 16 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s0, $sp, 24 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64F-NEXT:    addi.d $sp, $sp, 48
 ; LA64F-NEXT:    ret
 ;
 ; LA64D-LABEL: double_fmax_acq_rel:
 ; LA64D:       # %bb.0:
-; LA64D-NEXT:    addi.d $sp, $sp, -80
-; LA64D-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s4, $sp, 24 # 8-byte Folded Spill
-; LA64D-NEXT:    fst.d $fs0, $sp, 16 # 8-byte Folded Spill
+; LA64D-NEXT:    addi.d $sp, $sp, -48
+; LA64D-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
+; LA64D-NEXT:    fst.d $fs0, $sp, 24 # 8-byte Folded Spill
 ; LA64D-NEXT:    move $fp, $a0
 ; LA64D-NEXT:    fld.d $fa0, $a0, 0
 ; LA64D-NEXT:    addi.d $a0, $zero, 1
 ; LA64D-NEXT:    movgr2fr.d $fa1, $a0
 ; LA64D-NEXT:    ffint.d.l $fs0, $fa1
-; LA64D-NEXT:    ori $s0, $zero, 8
-; LA64D-NEXT:    addi.d $s1, $sp, 8
-; LA64D-NEXT:    addi.d $s2, $sp, 0
-; LA64D-NEXT:    ori $s3, $zero, 4
-; LA64D-NEXT:    ori $s4, $zero, 2
 ; LA64D-NEXT:    .p2align 4, , 16
 ; LA64D-NEXT:  .LBB23_1: # %atomicrmw.start
 ; LA64D-NEXT:    # =>This Inner Loop Header: Depth=1
 ; LA64D-NEXT:    fmax.d $fa1, $fa0, $fa0
 ; LA64D-NEXT:    fmax.d $fa1, $fa1, $fs0
-; LA64D-NEXT:    fst.d $fa0, $sp, 8
-; LA64D-NEXT:    fst.d $fa1, $sp, 0
-; LA64D-NEXT:    move $a0, $s0
+; LA64D-NEXT:    fst.d $fa0, $sp, 16
+; LA64D-NEXT:    fst.d $fa1, $sp, 8
+; LA64D-NEXT:    ori $a0, $zero, 8
+; LA64D-NEXT:    addi.d $a2, $sp, 16
+; LA64D-NEXT:    addi.d $a3, $sp, 8
+; LA64D-NEXT:    ori $a4, $zero, 4
+; LA64D-NEXT:    ori $a5, $zero, 2
 ; LA64D-NEXT:    move $a1, $fp
-; LA64D-NEXT:    move $a2, $s1
-; LA64D-NEXT:    move $a3, $s2
-; LA64D-NEXT:    move $a4, $s3
-; LA64D-NEXT:    move $a5, $s4
 ; LA64D-NEXT:    bl %plt(__atomic_compare_exchange)
-; LA64D-NEXT:    fld.d $fa0, $sp, 8
+; LA64D-NEXT:    fld.d $fa0, $sp, 16
 ; LA64D-NEXT:    beqz $a0, .LBB23_1
 ; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64D-NEXT:    fld.d $fs0, $sp, 16 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s4, $sp, 24 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
-; LA64D-NEXT:    addi.d $sp, $sp, 80
+; LA64D-NEXT:    fld.d $fs0, $sp, 24 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64D-NEXT:    addi.d $sp, $sp, 48
 ; LA64D-NEXT:    ret
   %v = atomicrmw fmax ptr %p, double 1.0 acq_rel, align 4
   ret double %v
@@ -2367,95 +2055,71 @@ define float @float_fmax_seq_cst(ptr %p) nounwind {
 define double @double_fadd_seq_cst(ptr %p) nounwind {
 ; LA64F-LABEL: double_fadd_seq_cst:
 ; LA64F:       # %bb.0:
-; LA64F-NEXT:    addi.d $sp, $sp, -80
-; LA64F-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s4, $sp, 24 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s5, $sp, 16 # 8-byte Folded Spill
+; LA64F-NEXT:    addi.d $sp, $sp, -48
+; LA64F-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s0, $sp, 24 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s1, $sp, 16 # 8-byte Folded Spill
 ; LA64F-NEXT:    move $fp, $a0
-; LA64F-NEXT:    ld.d $s5, $a0, 0
+; LA64F-NEXT:    ld.d $s1, $a0, 0
 ; LA64F-NEXT:    lu52i.d $s0, $zero, 1023
-; LA64F-NEXT:    ori $s1, $zero, 8
-; LA64F-NEXT:    addi.d $s2, $sp, 8
-; LA64F-NEXT:    addi.d $s3, $sp, 0
-; LA64F-NEXT:    ori $s4, $zero, 5
 ; LA64F-NEXT:    .p2align 4, , 16
 ; LA64F-NEXT:  .LBB28_1: # %atomicrmw.start
 ; LA64F-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64F-NEXT:    move $a0, $s5
+; LA64F-NEXT:    move $a0, $s1
 ; LA64F-NEXT:    move $a1, $s0
 ; LA64F-NEXT:    bl %plt(__adddf3)
-; LA64F-NEXT:    st.d $s5, $sp, 8
+; LA64F-NEXT:    st.d $s1, $sp, 8
 ; LA64F-NEXT:    st.d $a0, $sp, 0
-; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    ori $a0, $zero, 8
+; LA64F-NEXT:    addi.d $a2, $sp, 8
+; LA64F-NEXT:    addi.d $a3, $sp, 0
+; LA64F-NEXT:    ori $a4, $zero, 5
+; LA64F-NEXT:    ori $a5, $zero, 5
 ; LA64F-NEXT:    move $a1, $fp
-; LA64F-NEXT:    move $a2, $s2
-; LA64F-NEXT:    move $a3, $s3
-; LA64F-NEXT:    move $a4, $s4
-; LA64F-NEXT:    move $a5, $s4
 ; LA64F-NEXT:    bl %plt(__atomic_compare_exchange)
-; LA64F-NEXT:    ld.d $s5, $sp, 8
+; LA64F-NEXT:    ld.d $s1, $sp, 8
 ; LA64F-NEXT:    beqz $a0, .LBB28_1
 ; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64F-NEXT:    move $a0, $s5
-; LA64F-NEXT:    ld.d $s5, $sp, 16 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s4, $sp, 24 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
-; LA64F-NEXT:    addi.d $sp, $sp, 80
+; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    ld.d $s1, $sp, 16 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s0, $sp, 24 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64F-NEXT:    addi.d $sp, $sp, 48
 ; LA64F-NEXT:    ret
 ;
 ; LA64D-LABEL: double_fadd_seq_cst:
 ; LA64D:       # %bb.0:
-; LA64D-NEXT:    addi.d $sp, $sp, -80
-; LA64D-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
+; LA64D-NEXT:    addi.d $sp, $sp, -48
+; LA64D-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
 ; LA64D-NEXT:    fst.d $fs0, $sp, 24 # 8-byte Folded Spill
 ; LA64D-NEXT:    move $fp, $a0
 ; LA64D-NEXT:    fld.d $fa0, $a0, 0
 ; LA64D-NEXT:    addi.d $a0, $zero, 1
 ; LA64D-NEXT:    movgr2fr.d $fa1, $a0
 ; LA64D-NEXT:    ffint.d.l $fs0, $fa1
-; LA64D-NEXT:    ori $s0, $zero, 8
-; LA64D-NEXT:    addi.d $s1, $sp, 16
-; LA64D-NEXT:    addi.d $s2, $sp, 8
-; LA64D-NEXT:    ori $s3, $zero, 5
 ; LA64D-NEXT:    .p2align 4, , 16
 ; LA64D-NEXT:  .LBB28_1: # %atomicrmw.start
 ; LA64D-NEXT:    # =>This Inner Loop Header: Depth=1
 ; LA64D-NEXT:    fadd.d $fa1, $fa0, $fs0
 ; LA64D-NEXT:    fst.d $fa0, $sp, 16
 ; LA64D-NEXT:    fst.d $fa1, $sp, 8
-; LA64D-NEXT:    move $a0, $s0
+; LA64D-NEXT:    ori $a0, $zero, 8
+; LA64D-NEXT:    addi.d $a2, $sp, 16
+; LA64D-NEXT:    addi.d $a3, $sp, 8
+; LA64D-NEXT:    ori $a4, $zero, 5
+; LA64D-NEXT:    ori $a5, $zero, 5
 ; LA64D-NEXT:    move $a1, $fp
-; LA64D-NEXT:    move $a2, $s1
-; LA64D-NEXT:    move $a3, $s2
-; LA64D-NEXT:    move $a4, $s3
-; LA64D-NEXT:    move $a5, $s3
 ; LA64D-NEXT:    bl %plt(__atomic_compare_exchange)
 ; LA64D-NEXT:    fld.d $fa0, $sp, 16
 ; LA64D-NEXT:    beqz $a0, .LBB28_1
 ; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64D-NEXT:    fld.d $fs0, $sp, 24 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
-; LA64D-NEXT:    addi.d $sp, $sp, 80
+; LA64D-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64D-NEXT:    addi.d $sp, $sp, 48
 ; LA64D-NEXT:    ret
   %v = atomicrmw fadd ptr %p, double 1.0 seq_cst, align 4
   ret double %v
@@ -2464,95 +2128,71 @@ define double @double_fadd_seq_cst(ptr %p) nounwind {
 define double @double_fsub_seq_cst(ptr %p) nounwind {
 ; LA64F-LABEL: double_fsub_seq_cst:
 ; LA64F:       # %bb.0:
-; LA64F-NEXT:    addi.d $sp, $sp, -80
-; LA64F-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s4, $sp, 24 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s5, $sp, 16 # 8-byte Folded Spill
+; LA64F-NEXT:    addi.d $sp, $sp, -48
+; LA64F-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s0, $sp, 24 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s1, $sp, 16 # 8-byte Folded Spill
 ; LA64F-NEXT:    move $fp, $a0
-; LA64F-NEXT:    ld.d $s5, $a0, 0
+; LA64F-NEXT:    ld.d $s1, $a0, 0
 ; LA64F-NEXT:    lu52i.d $s0, $zero, -1025
-; LA64F-NEXT:    ori $s1, $zero, 8
-; LA64F-NEXT:    addi.d $s2, $sp, 8
-; LA64F-NEXT:    addi.d $s3, $sp, 0
-; LA64F-NEXT:    ori $s4, $zero, 5
 ; LA64F-NEXT:    .p2align 4, , 16
 ; LA64F-NEXT:  .LBB29_1: # %atomicrmw.start
 ; LA64F-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64F-NEXT:    move $a0, $s5
+; LA64F-NEXT:    move $a0, $s1
 ; LA64F-NEXT:    move $a1, $s0
 ; LA64F-NEXT:    bl %plt(__adddf3)
-; LA64F-NEXT:    st.d $s5, $sp, 8
+; LA64F-NEXT:    st.d $s1, $sp, 8
 ; LA64F-NEXT:    st.d $a0, $sp, 0
-; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    ori $a0, $zero, 8
+; LA64F-NEXT:    addi.d $a2, $sp, 8
+; LA64F-NEXT:    addi.d $a3, $sp, 0
+; LA64F-NEXT:    ori $a4, $zero, 5
+; LA64F-NEXT:    ori $a5, $zero, 5
 ; LA64F-NEXT:    move $a1, $fp
-; LA64F-NEXT:    move $a2, $s2
-; LA64F-NEXT:    move $a3, $s3
-; LA64F-NEXT:    move $a4, $s4
-; LA64F-NEXT:    move $a5, $s4
 ; LA64F-NEXT:    bl %plt(__atomic_compare_exchange)
-; LA64F-NEXT:    ld.d $s5, $sp, 8
+; LA64F-NEXT:    ld.d $s1, $sp, 8
 ; LA64F-NEXT:    beqz $a0, .LBB29_1
 ; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64F-NEXT:    move $a0, $s5
-; LA64F-NEXT:    ld.d $s5, $sp, 16 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s4, $sp, 24 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
-; LA64F-NEXT:    addi.d $sp, $sp, 80
+; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    ld.d $s1, $sp, 16 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s0, $sp, 24 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64F-NEXT:    addi.d $sp, $sp, 48
 ; LA64F-NEXT:    ret
 ;
 ; LA64D-LABEL: double_fsub_seq_cst:
 ; LA64D:       # %bb.0:
-; LA64D-NEXT:    addi.d $sp, $sp, -80
-; LA64D-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
+; LA64D-NEXT:    addi.d $sp, $sp, -48
+; LA64D-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
 ; LA64D-NEXT:    fst.d $fs0, $sp, 24 # 8-byte Folded Spill
 ; LA64D-NEXT:    move $fp, $a0
 ; LA64D-NEXT:    fld.d $fa0, $a0, 0
 ; LA64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI29_0)
 ; LA64D-NEXT:    addi.d $a0, $a0, %pc_lo12(.LCPI29_0)
 ; LA64D-NEXT:    fld.d $fs0, $a0, 0
-; LA64D-NEXT:    ori $s0, $zero, 8
-; LA64D-NEXT:    addi.d $s1, $sp, 16
-; LA64D-NEXT:    addi.d $s2, $sp, 8
-; LA64D-NEXT:    ori $s3, $zero, 5
 ; LA64D-NEXT:    .p2align 4, , 16
 ; LA64D-NEXT:  .LBB29_1: # %atomicrmw.start
 ; LA64D-NEXT:    # =>This Inner Loop Header: Depth=1
 ; LA64D-NEXT:    fadd.d $fa1, $fa0, $fs0
 ; LA64D-NEXT:    fst.d $fa0, $sp, 16
 ; LA64D-NEXT:    fst.d $fa1, $sp, 8
-; LA64D-NEXT:    move $a0, $s0
+; LA64D-NEXT:    ori $a0, $zero, 8
+; LA64D-NEXT:    addi.d $a2, $sp, 16
+; LA64D-NEXT:    addi.d $a3, $sp, 8
+; LA64D-NEXT:    ori $a4, $zero, 5
+; LA64D-NEXT:    ori $a5, $zero, 5
 ; LA64D-NEXT:    move $a1, $fp
-; LA64D-NEXT:    move $a2, $s1
-; LA64D-NEXT:    move $a3, $s2
-; LA64D-NEXT:    move $a4, $s3
-; LA64D-NEXT:    move $a5, $s3
 ; LA64D-NEXT:    bl %plt(__atomic_compare_exchange)
 ; LA64D-NEXT:    fld.d $fa0, $sp, 16
 ; LA64D-NEXT:    beqz $a0, .LBB29_1
 ; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64D-NEXT:    fld.d $fs0, $sp, 24 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
-; LA64D-NEXT:    addi.d $sp, $sp, 80
+; LA64D-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64D-NEXT:    addi.d $sp, $sp, 48
 ; LA64D-NEXT:    ret
   %v = atomicrmw fsub ptr %p, double 1.0 seq_cst, align 4
   ret double %v
@@ -2561,71 +2201,51 @@ define double @double_fsub_seq_cst(ptr %p) nounwind {
 define double @double_fmin_seq_cst(ptr %p) nounwind {
 ; LA64F-LABEL: double_fmin_seq_cst:
 ; LA64F:       # %bb.0:
-; LA64F-NEXT:    addi.d $sp, $sp, -80
-; LA64F-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s4, $sp, 24 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s5, $sp, 16 # 8-byte Folded Spill
+; LA64F-NEXT:    addi.d $sp, $sp, -48
+; LA64F-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s0, $sp, 24 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s1, $sp, 16 # 8-byte Folded Spill
 ; LA64F-NEXT:    move $fp, $a0
-; LA64F-NEXT:    ld.d $s5, $a0, 0
+; LA64F-NEXT:    ld.d $s1, $a0, 0
 ; LA64F-NEXT:    lu52i.d $s0, $zero, 1023
-; LA64F-NEXT:    ori $s1, $zero, 8
-; LA64F-NEXT:    addi.d $s2, $sp, 8
-; LA64F-NEXT:    addi.d $s3, $sp, 0
-; LA64F-NEXT:    ori $s4, $zero, 5
 ; LA64F-NEXT:    .p2align 4, , 16
 ; LA64F-NEXT:  .LBB30_1: # %atomicrmw.start
 ; LA64F-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64F-NEXT:    move $a0, $s5
+; LA64F-NEXT:    move $a0, $s1
 ; LA64F-NEXT:    move $a1, $s0
 ; LA64F-NEXT:    bl %plt(fmin)
-; LA64F-NEXT:    st.d $s5, $sp, 8
+; LA64F-NEXT:    st.d $s1, $sp, 8
 ; LA64F-NEXT:    st.d $a0, $sp, 0
-; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    ori $a0, $zero, 8
+; LA64F-NEXT:    addi.d $a2, $sp, 8
+; LA64F-NEXT:    addi.d $a3, $sp, 0
+; LA64F-NEXT:    ori $a4, $zero, 5
+; LA64F-NEXT:    ori $a5, $zero, 5
 ; LA64F-NEXT:    move $a1, $fp
-; LA64F-NEXT:    move $a2, $s2
-; LA64F-NEXT:    move $a3, $s3
-; LA64F-NEXT:    move $a4, $s4
-; LA64F-NEXT:    move $a5, $s4
 ; LA64F-NEXT:    bl %plt(__atomic_compare_exchange)
-; LA64F-NEXT:    ld.d $s5, $sp, 8
+; LA64F-NEXT:    ld.d $s1, $sp, 8
 ; LA64F-NEXT:    beqz $a0, .LBB30_1
 ; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64F-NEXT:    move $a0, $s5
-; LA64F-NEXT:    ld.d $s5, $sp, 16 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s4, $sp, 24 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
-; LA64F-NEXT:    addi.d $sp, $sp, 80
+; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    ld.d $s1, $sp, 16 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s0, $sp, 24 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64F-NEXT:    addi.d $sp, $sp, 48
 ; LA64F-NEXT:    ret
 ;
 ; LA64D-LABEL: double_fmin_seq_cst:
 ; LA64D:       # %bb.0:
-; LA64D-NEXT:    addi.d $sp, $sp, -80
-; LA64D-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
+; LA64D-NEXT:    addi.d $sp, $sp, -48
+; LA64D-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
 ; LA64D-NEXT:    fst.d $fs0, $sp, 24 # 8-byte Folded Spill
 ; LA64D-NEXT:    move $fp, $a0
 ; LA64D-NEXT:    fld.d $fa0, $a0, 0
 ; LA64D-NEXT:    addi.d $a0, $zero, 1
 ; LA64D-NEXT:    movgr2fr.d $fa1, $a0
 ; LA64D-NEXT:    ffint.d.l $fs0, $fa1
-; LA64D-NEXT:    ori $s0, $zero, 8
-; LA64D-NEXT:    addi.d $s1, $sp, 16
-; LA64D-NEXT:    addi.d $s2, $sp, 8
-; LA64D-NEXT:    ori $s3, $zero, 5
 ; LA64D-NEXT:    .p2align 4, , 16
 ; LA64D-NEXT:  .LBB30_1: # %atomicrmw.start
 ; LA64D-NEXT:    # =>This Inner Loop Header: Depth=1
@@ -2633,24 +2253,20 @@ define double @double_fmin_seq_cst(ptr %p) nounwind {
 ; LA64D-NEXT:    fmin.d $fa1, $fa1, $fs0
 ; LA64D-NEXT:    fst.d $fa0, $sp, 16
 ; LA64D-NEXT:    fst.d $fa1, $sp, 8
-; LA64D-NEXT:    move $a0, $s0
+; LA64D-NEXT:    ori $a0, $zero, 8
+; LA64D-NEXT:    addi.d $a2, $sp, 16
+; LA64D-NEXT:    addi.d $a3, $sp, 8
+; LA64D-NEXT:    ori $a4, $zero, 5
+; LA64D-NEXT:    ori $a5, $zero, 5
 ; LA64D-NEXT:    move $a1, $fp
-; LA64D-NEXT:    move $a2, $s1
-; LA64D-NEXT:    move $a3, $s2
-; LA64D-NEXT:    move $a4, $s3
-; LA64D-NEXT:    move $a5, $s3
 ; LA64D-NEXT:    bl %plt(__atomic_compare_exchange)
 ; LA64D-NEXT:    fld.d $fa0, $sp, 16
 ; LA64D-NEXT:    beqz $a0, .LBB30_1
 ; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64D-NEXT:    fld.d $fs0, $sp, 24 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
-; LA64D-NEXT:    addi.d $sp, $sp, 80
+; LA64D-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64D-NEXT:    addi.d $sp, $sp, 48
 ; LA64D-NEXT:    ret
   %v = atomicrmw fmin ptr %p, double 1.0 seq_cst, align 4
   ret double %v
@@ -2659,71 +2275,51 @@ define double @double_fmin_seq_cst(ptr %p) nounwind {
 define double @double_fmax_seq_cst(ptr %p) nounwind {
 ; LA64F-LABEL: double_fmax_seq_cst:
 ; LA64F:       # %bb.0:
-; LA64F-NEXT:    addi.d $sp, $sp, -80
-; LA64F-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s4, $sp, 24 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s5, $sp, 16 # 8-byte Folded Spill
+; LA64F-NEXT:    addi.d $sp, $sp, -48
+; LA64F-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s0, $sp, 24 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s1, $sp, 16 # 8-byte Folded Spill
 ; LA64F-NEXT:    move $fp, $a0
-; LA64F-NEXT:    ld.d $s5, $a0, 0
+; LA64F-NEXT:    ld.d $s1, $a0, 0
 ; LA64F-NEXT:    lu52i.d $s0, $zero, 1023
-; LA64F-NEXT:    ori $s1, $zero, 8
-; LA64F-NEXT:    addi.d $s2, $sp, 8
-; LA64F-NEXT:    addi.d $s3, $sp, 0
-; LA64F-NEXT:    ori $s4, $zero, 5
 ; LA64F-NEXT:    .p2align 4, , 16
 ; LA64F-NEXT:  .LBB31_1: # %atomicrmw.start
 ; LA64F-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64F-NEXT:    move $a0, $s5
+; LA64F-NEXT:    move $a0, $s1
 ; LA64F-NEXT:    move $a1, $s0
 ; LA64F-NEXT:    bl %plt(fmax)
-; LA64F-NEXT:    st.d $s5, $sp, 8
+; LA64F-NEXT:    st.d $s1, $sp, 8
 ; LA64F-NEXT:    st.d $a0, $sp, 0
-; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    ori $a0, $zero, 8
+; LA64F-NEXT:    addi.d $a2, $sp, 8
+; LA64F-NEXT:    addi.d $a3, $sp, 0
+; LA64F-NEXT:    ori $a4, $zero, 5
+; LA64F-NEXT:    ori $a5, $zero, 5
 ; LA64F-NEXT:    move $a1, $fp
-; LA64F-NEXT:    move $a2, $s2
-; LA64F-NEXT:    move $a3, $s3
-; LA64F-NEXT:    move $a4, $s4
-; LA64F-NEXT:    move $a5, $s4
 ; LA64F-NEXT:    bl %plt(__atomic_compare_exchange)
-; LA64F-NEXT:    ld.d $s5, $sp, 8
+; LA64F-NEXT:    ld.d $s1, $sp, 8
 ; LA64F-NEXT:    beqz $a0, .LBB31_1
 ; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64F-NEXT:    move $a0, $s5
-; LA64F-NEXT:    ld.d $s5, $sp, 16 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s4, $sp, 24 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
-; LA64F-NEXT:    addi.d $sp, $sp, 80
+; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    ld.d $s1, $sp, 16 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s0, $sp, 24 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64F-NEXT:    addi.d $sp, $sp, 48
 ; LA64F-NEXT:    ret
 ;
 ; LA64D-LABEL: double_fmax_seq_cst:
 ; LA64D:       # %bb.0:
-; LA64D-NEXT:    addi.d $sp, $sp, -80
-; LA64D-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
+; LA64D-NEXT:    addi.d $sp, $sp, -48
+; LA64D-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
 ; LA64D-NEXT:    fst.d $fs0, $sp, 24 # 8-byte Folded Spill
 ; LA64D-NEXT:    move $fp, $a0
 ; LA64D-NEXT:    fld.d $fa0, $a0, 0
 ; LA64D-NEXT:    addi.d $a0, $zero, 1
 ; LA64D-NEXT:    movgr2fr.d $fa1, $a0
 ; LA64D-NEXT:    ffint.d.l $fs0, $fa1
-; LA64D-NEXT:    ori $s0, $zero, 8
-; LA64D-NEXT:    addi.d $s1, $sp, 16
-; LA64D-NEXT:    addi.d $s2, $sp, 8
-; LA64D-NEXT:    ori $s3, $zero, 5
 ; LA64D-NEXT:    .p2align 4, , 16
 ; LA64D-NEXT:  .LBB31_1: # %atomicrmw.start
 ; LA64D-NEXT:    # =>This Inner Loop Header: Depth=1
@@ -2731,24 +2327,20 @@ define double @double_fmax_seq_cst(ptr %p) nounwind {
 ; LA64D-NEXT:    fmax.d $fa1, $fa1, $fs0
 ; LA64D-NEXT:    fst.d $fa0, $sp, 16
 ; LA64D-NEXT:    fst.d $fa1, $sp, 8
-; LA64D-NEXT:    move $a0, $s0
+; LA64D-NEXT:    ori $a0, $zero, 8
+; LA64D-NEXT:    addi.d $a2, $sp, 16
+; LA64D-NEXT:    addi.d $a3, $sp, 8
+; LA64D-NEXT:    ori $a4, $zero, 5
+; LA64D-NEXT:    ori $a5, $zero, 5
 ; LA64D-NEXT:    move $a1, $fp
-; LA64D-NEXT:    move $a2, $s1
-; LA64D-NEXT:    move $a3, $s2
-; LA64D-NEXT:    move $a4, $s3
-; LA64D-NEXT:    move $a5, $s3
 ; LA64D-NEXT:    bl %plt(__atomic_compare_exchange)
 ; LA64D-NEXT:    fld.d $fa0, $sp, 16
 ; LA64D-NEXT:    beqz $a0, .LBB31_1
 ; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64D-NEXT:    fld.d $fs0, $sp, 24 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
-; LA64D-NEXT:    addi.d $sp, $sp, 80
+; LA64D-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64D-NEXT:    addi.d $sp, $sp, 48
 ; LA64D-NEXT:    ret
   %v = atomicrmw fmax ptr %p, double 1.0 seq_cst, align 4
   ret double %v
@@ -3049,89 +2641,71 @@ define float @float_fmax_monotonic(ptr %p) nounwind {
 define double @double_fadd_monotonic(ptr %p) nounwind {
 ; LA64F-LABEL: double_fadd_monotonic:
 ; LA64F:       # %bb.0:
-; LA64F-NEXT:    addi.d $sp, $sp, -80
-; LA64F-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s4, $sp, 24 # 8-byte Folded Spill
+; LA64F-NEXT:    addi.d $sp, $sp, -48
+; LA64F-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s0, $sp, 24 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s1, $sp, 16 # 8-byte Folded Spill
 ; LA64F-NEXT:    move $fp, $a0
-; LA64F-NEXT:    ld.d $s4, $a0, 0
+; LA64F-NEXT:    ld.d $s1, $a0, 0
 ; LA64F-NEXT:    lu52i.d $s0, $zero, 1023
-; LA64F-NEXT:    ori $s1, $zero, 8
-; LA64F-NEXT:    addi.d $s2, $sp, 16
-; LA64F-NEXT:    addi.d $s3, $sp, 8
 ; LA64F-NEXT:    .p2align 4, , 16
 ; LA64F-NEXT:  .LBB36_1: # %atomicrmw.start
 ; LA64F-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64F-NEXT:    move $a0, $s4
+; LA64F-NEXT:    move $a0, $s1
 ; LA64F-NEXT:    move $a1, $s0
 ; LA64F-NEXT:    bl %plt(__adddf3)
-; LA64F-NEXT:    st.d $s4, $sp, 16
-; LA64F-NEXT:    st.d $a0, $sp, 8
-; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    st.d $s1, $sp, 8
+; LA64F-NEXT:    st.d $a0, $sp, 0
+; LA64F-NEXT:    ori $a0, $zero, 8
+; LA64F-NEXT:    addi.d $a2, $sp, 8
+; LA64F-NEXT:    addi.d $a3, $sp, 0
 ; LA64F-NEXT:    move $a1, $fp
-; LA64F-NEXT:    move $a2, $s2
-; LA64F-NEXT:    move $a3, $s3
 ; LA64F-NEXT:    move $a4, $zero
 ; LA64F-NEXT:    move $a5, $zero
 ; LA64F-NEXT:    bl %plt(__atomic_compare_exchange)
-; LA64F-NEXT:    ld.d $s4, $sp, 16
+; LA64F-NEXT:    ld.d $s1, $sp, 8
 ; LA64F-NEXT:    beqz $a0, .LBB36_1
 ; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64F-NEXT:    move $a0, $s4
-; LA64F-NEXT:    ld.d $s4, $sp, 24 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
-; LA64F-NEXT:    addi.d $sp, $sp, 80
+; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    ld.d $s1, $sp, 16 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s0, $sp, 24 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64F-NEXT:    addi.d $sp, $sp, 48
 ; LA64F-NEXT:    ret
 ;
 ; LA64D-LABEL: double_fadd_monotonic:
 ; LA64D:       # %bb.0:
-; LA64D-NEXT:    addi.d $sp, $sp, -64
-; LA64D-NEXT:    st.d $ra, $sp, 56 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $fp, $sp, 48 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s0, $sp, 40 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s1, $sp, 32 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s2, $sp, 24 # 8-byte Folded Spill
-; LA64D-NEXT:    fst.d $fs0, $sp, 16 # 8-byte Folded Spill
+; LA64D-NEXT:    addi.d $sp, $sp, -48
+; LA64D-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
+; LA64D-NEXT:    fst.d $fs0, $sp, 24 # 8-byte Folded Spill
 ; LA64D-NEXT:    move $fp, $a0
 ; LA64D-NEXT:    fld.d $fa0, $a0, 0
 ; LA64D-NEXT:    addi.d $a0, $zero, 1
 ; LA64D-NEXT:    movgr2fr.d $fa1, $a0
 ; LA64D-NEXT:    ffint.d.l $fs0, $fa1
-; LA64D-NEXT:    ori $s0, $zero, 8
-; LA64D-NEXT:    addi.d $s1, $sp, 8
-; LA64D-NEXT:    addi.d $s2, $sp, 0
 ; LA64D-NEXT:    .p2align 4, , 16
 ; LA64D-NEXT:  .LBB36_1: # %atomicrmw.start
 ; LA64D-NEXT:    # =>This Inner Loop Header: Depth=1
 ; LA64D-NEXT:    fadd.d $fa1, $fa0, $fs0
-; LA64D-NEXT:    fst.d $fa0, $sp, 8
-; LA64D-NEXT:    fst.d $fa1, $sp, 0
-; LA64D-NEXT:    move $a0, $s0
+; LA64D-NEXT:    fst.d $fa0, $sp, 16
+; LA64D-NEXT:    fst.d $fa1, $sp, 8
+; LA64D-NEXT:    ori $a0, $zero, 8
+; LA64D-NEXT:    addi.d $a2, $sp, 16
+; LA64D-NEXT:    addi.d $a3, $sp, 8
 ; LA64D-NEXT:    move $a1, $fp
-; LA64D-NEXT:    move $a2, $s1
-; LA64D-NEXT:    move $a3, $s2
 ; LA64D-NEXT:    move $a4, $zero
 ; LA64D-NEXT:    move $a5, $zero
 ; LA64D-NEXT:    bl %plt(__atomic_compare_exchange)
-; LA64D-NEXT:    fld.d $fa0, $sp, 8
+; LA64D-NEXT:    fld.d $fa0, $sp, 16
 ; LA64D-NEXT:    beqz $a0, .LBB36_1
 ; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64D-NEXT:    fld.d $fs0, $sp, 16 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s2, $sp, 24 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s1, $sp, 32 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s0, $sp, 40 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $fp, $sp, 48 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $ra, $sp, 56 # 8-byte Folded Reload
-; LA64D-NEXT:    addi.d $sp, $sp, 64
+; LA64D-NEXT:    fld.d $fs0, $sp, 24 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64D-NEXT:    addi.d $sp, $sp, 48
 ; LA64D-NEXT:    ret
   %v = atomicrmw fadd ptr %p, double 1.0 monotonic, align 4
   ret double %v
@@ -3140,89 +2714,71 @@ define double @double_fadd_monotonic(ptr %p) nounwind {
 define double @double_fsub_monotonic(ptr %p) nounwind {
 ; LA64F-LABEL: double_fsub_monotonic:
 ; LA64F:       # %bb.0:
-; LA64F-NEXT:    addi.d $sp, $sp, -80
-; LA64F-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s4, $sp, 24 # 8-byte Folded Spill
+; LA64F-NEXT:    addi.d $sp, $sp, -48
+; LA64F-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s0, $sp, 24 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s1, $sp, 16 # 8-byte Folded Spill
 ; LA64F-NEXT:    move $fp, $a0
-; LA64F-NEXT:    ld.d $s4, $a0, 0
+; LA64F-NEXT:    ld.d $s1, $a0, 0
 ; LA64F-NEXT:    lu52i.d $s0, $zero, -1025
-; LA64F-NEXT:    ori $s1, $zero, 8
-; LA64F-NEXT:    addi.d $s2, $sp, 16
-; LA64F-NEXT:    addi.d $s3, $sp, 8
 ; LA64F-NEXT:    .p2align 4, , 16
 ; LA64F-NEXT:  .LBB37_1: # %atomicrmw.start
 ; LA64F-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64F-NEXT:    move $a0, $s4
+; LA64F-NEXT:    move $a0, $s1
 ; LA64F-NEXT:    move $a1, $s0
 ; LA64F-NEXT:    bl %plt(__adddf3)
-; LA64F-NEXT:    st.d $s4, $sp, 16
-; LA64F-NEXT:    st.d $a0, $sp, 8
-; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    st.d $s1, $sp, 8
+; LA64F-NEXT:    st.d $a0, $sp, 0
+; LA64F-NEXT:    ori $a0, $zero, 8
+; LA64F-NEXT:    addi.d $a2, $sp, 8
+; LA64F-NEXT:    addi.d $a3, $sp, 0
 ; LA64F-NEXT:    move $a1, $fp
-; LA64F-NEXT:    move $a2, $s2
-; LA64F-NEXT:    move $a3, $s3
 ; LA64F-NEXT:    move $a4, $zero
 ; LA64F-NEXT:    move $a5, $zero
 ; LA64F-NEXT:    bl %plt(__atomic_compare_exchange)
-; LA64F-NEXT:    ld.d $s4, $sp, 16
+; LA64F-NEXT:    ld.d $s1, $sp, 8
 ; LA64F-NEXT:    beqz $a0, .LBB37_1
 ; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64F-NEXT:    move $a0, $s4
-; LA64F-NEXT:    ld.d $s4, $sp, 24 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
-; LA64F-NEXT:    addi.d $sp, $sp, 80
+; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    ld.d $s1, $sp, 16 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s0, $sp, 24 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64F-NEXT:    addi.d $sp, $sp, 48
 ; LA64F-NEXT:    ret
 ;
 ; LA64D-LABEL: double_fsub_monotonic:
 ; LA64D:       # %bb.0:
-; LA64D-NEXT:    addi.d $sp, $sp, -64
-; LA64D-NEXT:    st.d $ra, $sp, 56 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $fp, $sp, 48 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s0, $sp, 40 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s1, $sp, 32 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s2, $sp, 24 # 8-byte Folded Spill
-; LA64D-NEXT:    fst.d $fs0, $sp, 16 # 8-byte Folded Spill
+; LA64D-NEXT:    addi.d $sp, $sp, -48
+; LA64D-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
+; LA64D-NEXT:    fst.d $fs0, $sp, 24 # 8-byte Folded Spill
 ; LA64D-NEXT:    move $fp, $a0
 ; LA64D-NEXT:    fld.d $fa0, $a0, 0
 ; LA64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI37_0)
 ; LA64D-NEXT:    addi.d $a0, $a0, %pc_lo12(.LCPI37_0)
 ; LA64D-NEXT:    fld.d $fs0, $a0, 0
-; LA64D-NEXT:    ori $s0, $zero, 8
-; LA64D-NEXT:    addi.d $s1, $sp, 8
-; LA64D-NEXT:    addi.d $s2, $sp, 0
 ; LA64D-NEXT:    .p2align 4, , 16
 ; LA64D-NEXT:  .LBB37_1: # %atomicrmw.start
 ; LA64D-NEXT:    # =>This Inner Loop Header: Depth=1
 ; LA64D-NEXT:    fadd.d $fa1, $fa0, $fs0
-; LA64D-NEXT:    fst.d $fa0, $sp, 8
-; LA64D-NEXT:    fst.d $fa1, $sp, 0
-; LA64D-NEXT:    move $a0, $s0
+; LA64D-NEXT:    fst.d $fa0, $sp, 16
+; LA64D-NEXT:    fst.d $fa1, $sp, 8
+; LA64D-NEXT:    ori $a0, $zero, 8
+; LA64D-NEXT:    addi.d $a2, $sp, 16
+; LA64D-NEXT:    addi.d $a3, $sp, 8
 ; LA64D-NEXT:    move $a1, $fp
-; LA64D-NEXT:    move $a2, $s1
-; LA64D-NEXT:    move $a3, $s2
 ; LA64D-NEXT:    move $a4, $zero
 ; LA64D-NEXT:    move $a5, $zero
 ; LA64D-NEXT:    bl %plt(__atomic_compare_exchange)
-; LA64D-NEXT:    fld.d $fa0, $sp, 8
+; LA64D-NEXT:    fld.d $fa0, $sp, 16
 ; LA64D-NEXT:    beqz $a0, .LBB37_1
 ; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64D-NEXT:    fld.d $fs0, $sp, 16 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s2, $sp, 24 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s1, $sp, 32 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s0, $sp, 40 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $fp, $sp, 48 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $ra, $sp, 56 # 8-byte Folded Reload
-; LA64D-NEXT:    addi.d $sp, $sp, 64
+; LA64D-NEXT:    fld.d $fs0, $sp, 24 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64D-NEXT:    addi.d $sp, $sp, 48
 ; LA64D-NEXT:    ret
   %v = atomicrmw fsub ptr %p, double 1.0 monotonic, align 4
   ret double %v
@@ -3231,90 +2787,72 @@ define double @double_fsub_monotonic(ptr %p) nounwind {
 define double @double_fmin_monotonic(ptr %p) nounwind {
 ; LA64F-LABEL: double_fmin_monotonic:
 ; LA64F:       # %bb.0:
-; LA64F-NEXT:    addi.d $sp, $sp, -80
-; LA64F-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s4, $sp, 24 # 8-byte Folded Spill
+; LA64F-NEXT:    addi.d $sp, $sp, -48
+; LA64F-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s0, $sp, 24 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s1, $sp, 16 # 8-byte Folded Spill
 ; LA64F-NEXT:    move $fp, $a0
-; LA64F-NEXT:    ld.d $s4, $a0, 0
+; LA64F-NEXT:    ld.d $s1, $a0, 0
 ; LA64F-NEXT:    lu52i.d $s0, $zero, 1023
-; LA64F-NEXT:    ori $s1, $zero, 8
-; LA64F-NEXT:    addi.d $s2, $sp, 16
-; LA64F-NEXT:    addi.d $s3, $sp, 8
 ; LA64F-NEXT:    .p2align 4, , 16
 ; LA64F-NEXT:  .LBB38_1: # %atomicrmw.start
 ; LA64F-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64F-NEXT:    move $a0, $s4
+; LA64F-NEXT:    move $a0, $s1
 ; LA64F-NEXT:    move $a1, $s0
 ; LA64F-NEXT:    bl %plt(fmin)
-; LA64F-NEXT:    st.d $s4, $sp, 16
-; LA64F-NEXT:    st.d $a0, $sp, 8
-; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    st.d $s1, $sp, 8
+; LA64F-NEXT:    st.d $a0, $sp, 0
+; LA64F-NEXT:    ori $a0, $zero, 8
+; LA64F-NEXT:    addi.d $a2, $sp, 8
+; LA64F-NEXT:    addi.d $a3, $sp, 0
 ; LA64F-NEXT:    move $a1, $fp
-; LA64F-NEXT:    move $a2, $s2
-; LA64F-NEXT:    move $a3, $s3
 ; LA64F-NEXT:    move $a4, $zero
 ; LA64F-NEXT:    move $a5, $zero
 ; LA64F-NEXT:    bl %plt(__atomic_compare_exchange)
-; LA64F-NEXT:    ld.d $s4, $sp, 16
+; LA64F-NEXT:    ld.d $s1, $sp, 8
 ; LA64F-NEXT:    beqz $a0, .LBB38_1
 ; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64F-NEXT:    move $a0, $s4
-; LA64F-NEXT:    ld.d $s4, $sp, 24 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
-; LA64F-NEXT:    addi.d $sp, $sp, 80
+; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    ld.d $s1, $sp, 16 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s0, $sp, 24 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64F-NEXT:    addi.d $sp, $sp, 48
 ; LA64F-NEXT:    ret
 ;
 ; LA64D-LABEL: double_fmin_monotonic:
 ; LA64D:       # %bb.0:
-; LA64D-NEXT:    addi.d $sp, $sp, -64
-; LA64D-NEXT:    st.d $ra, $sp, 56 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $fp, $sp, 48 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s0, $sp, 40 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s1, $sp, 32 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s2, $sp, 24 # 8-byte Folded Spill
-; LA64D-NEXT:    fst.d $fs0, $sp, 16 # 8-byte Folded Spill
+; LA64D-NEXT:    addi.d $sp, $sp, -48
+; LA64D-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
+; LA64D-NEXT:    fst.d $fs0, $sp, 24 # 8-byte Folded Spill
 ; LA64D-NEXT:    move $fp, $a0
 ; LA64D-NEXT:    fld.d $fa0, $a0, 0
 ; LA64D-NEXT:    addi.d $a0, $zero, 1
 ; LA64D-NEXT:    movgr2fr.d $fa1, $a0
 ; LA64D-NEXT:    ffint.d.l $fs0, $fa1
-; LA64D-NEXT:    ori $s0, $zero, 8
-; LA64D-NEXT:    addi.d $s1, $sp, 8
-; LA64D-NEXT:    addi.d $s2, $sp, 0
 ; LA64D-NEXT:    .p2align 4, , 16
 ; LA64D-NEXT:  .LBB38_1: # %atomicrmw.start
 ; LA64D-NEXT:    # =>This Inner Loop Header: Depth=1
 ; LA64D-NEXT:    fmax.d $fa1, $fa0, $fa0
 ; LA64D-NEXT:    fmin.d $fa1, $fa1, $fs0
-; LA64D-NEXT:    fst.d $fa0, $sp, 8
-; LA64D-NEXT:    fst.d $fa1, $sp, 0
-; LA64D-NEXT:    move $a0, $s0
+; LA64D-NEXT:    fst.d $fa0, $sp, 16
+; LA64D-NEXT:    fst.d $fa1, $sp, 8
+; LA64D-NEXT:    ori $a0, $zero, 8
+; LA64D-NEXT:    addi.d $a2, $sp, 16
+; LA64D-NEXT:    addi.d $a3, $sp, 8
 ; LA64D-NEXT:    move $a1, $fp
-; LA64D-NEXT:    move $a2, $s1
-; LA64D-NEXT:    move $a3, $s2
 ; LA64D-NEXT:    move $a4, $zero
 ; LA64D-NEXT:    move $a5, $zero
 ; LA64D-NEXT:    bl %plt(__atomic_compare_exchange)
-; LA64D-NEXT:    fld.d $fa0, $sp, 8
+; LA64D-NEXT:    fld.d $fa0, $sp, 16
 ; LA64D-NEXT:    beqz $a0, .LBB38_1
 ; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64D-NEXT:    fld.d $fs0, $sp, 16 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s2, $sp, 24 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s1, $sp, 32 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s0, $sp, 40 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $fp, $sp, 48 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $ra, $sp, 56 # 8-byte Folded Reload
-; LA64D-NEXT:    addi.d $sp, $sp, 64
+; LA64D-NEXT:    fld.d $fs0, $sp, 24 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64D-NEXT:    addi.d $sp, $sp, 48
 ; LA64D-NEXT:    ret
   %v = atomicrmw fmin ptr %p, double 1.0 monotonic, align 4
   ret double %v
@@ -3323,90 +2861,72 @@ define double @double_fmin_monotonic(ptr %p) nounwind {
 define double @double_fmax_monotonic(ptr %p) nounwind {
 ; LA64F-LABEL: double_fmax_monotonic:
 ; LA64F:       # %bb.0:
-; LA64F-NEXT:    addi.d $sp, $sp, -80
-; LA64F-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s4, $sp, 24 # 8-byte Folded Spill
+; LA64F-NEXT:    addi.d $sp, $sp, -48
+; LA64F-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s0, $sp, 24 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s1, $sp, 16 # 8-byte Folded Spill
 ; LA64F-NEXT:    move $fp, $a0
-; LA64F-NEXT:    ld.d $s4, $a0, 0
+; LA64F-NEXT:    ld.d $s1, $a0, 0
 ; LA64F-NEXT:    lu52i.d $s0, $zero, 1023
-; LA64F-NEXT:    ori $s1, $zero, 8
-; LA64F-NEXT:    addi.d $s2, $sp, 16
-; LA64F-NEXT:    addi.d $s3, $sp, 8
 ; LA64F-NEXT:    .p2align 4, , 16
 ; LA64F-NEXT:  .LBB39_1: # %atomicrmw.start
 ; LA64F-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64F-NEXT:    move $a0, $s4
+; LA64F-NEXT:    move $a0, $s1
 ; LA64F-NEXT:    move $a1, $s0
 ; LA64F-NEXT:    bl %plt(fmax)
-; LA64F-NEXT:    st.d $s4, $sp, 16
-; LA64F-NEXT:    st.d $a0, $sp, 8
-; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    st.d $s1, $sp, 8
+; LA64F-NEXT:    st.d $a0, $sp, 0
+; LA64F-NEXT:    ori $a0, $zero, 8
+; LA64F-NEXT:    addi.d $a2, $sp, 8
+; LA64F-NEXT:    addi.d $a3, $sp, 0
 ; LA64F-NEXT:    move $a1, $fp
-; LA64F-NEXT:    move $a2, $s2
-; LA64F-NEXT:    move $a3, $s3
 ; LA64F-NEXT:    move $a4, $zero
 ; LA64F-NEXT:    move $a5, $zero
 ; LA64F-NEXT:    bl %plt(__atomic_compare_exchange)
-; LA64F-NEXT:    ld.d $s4, $sp, 16
+; LA64F-NEXT:    ld.d $s1, $sp, 8
 ; LA64F-NEXT:    beqz $a0, .LBB39_1
 ; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64F-NEXT:    move $a0, $s4
-; LA64F-NEXT:    ld.d $s4, $sp, 24 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
-; LA64F-NEXT:    addi.d $sp, $sp, 80
+; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    ld.d $s1, $sp, 16 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s0, $sp, 24 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64F-NEXT:    addi.d $sp, $sp, 48
 ; LA64F-NEXT:    ret
 ;
 ; LA64D-LABEL: double_fmax_monotonic:
 ; LA64D:       # %bb.0:
-; LA64D-NEXT:    addi.d $sp, $sp, -64
-; LA64D-NEXT:    st.d $ra, $sp, 56 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $fp, $sp, 48 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s0, $sp, 40 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s1, $sp, 32 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s2, $sp, 24 # 8-byte Folded Spill
-; LA64D-NEXT:    fst.d $fs0, $sp, 16 # 8-byte Folded Spill
+; LA64D-NEXT:    addi.d $sp, $sp, -48
+; LA64D-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
+; LA64D-NEXT:    fst.d $fs0, $sp, 24 # 8-byte Folded Spill
 ; LA64D-NEXT:    move $fp, $a0
 ; LA64D-NEXT:    fld.d $fa0, $a0, 0
 ; LA64D-NEXT:    addi.d $a0, $zero, 1
 ; LA64D-NEXT:    movgr2fr.d $fa1, $a0
 ; LA64D-NEXT:    ffint.d.l $fs0, $fa1
-; LA64D-NEXT:    ori $s0, $zero, 8
-; LA64D-NEXT:    addi.d $s1, $sp, 8
-; LA64D-NEXT:    addi.d $s2, $sp, 0
 ; LA64D-NEXT:    .p2align 4, , 16
 ; LA64D-NEXT:  .LBB39_1: # %atomicrmw.start
 ; LA64D-NEXT:    # =>This Inner Loop Header: Depth=1
 ; LA64D-NEXT:    fmax.d $fa1, $fa0, $fa0
 ; LA64D-NEXT:    fmax.d $fa1, $fa1, $fs0
-; LA64D-NEXT:    fst.d $fa0, $sp, 8
-; LA64D-NEXT:    fst.d $fa1, $sp, 0
-; LA64D-NEXT:    move $a0, $s0
+; LA64D-NEXT:    fst.d $fa0, $sp, 16
+; LA64D-NEXT:    fst.d $fa1, $sp, 8
+; LA64D-NEXT:    ori $a0, $zero, 8
+; LA64D-NEXT:    addi.d $a2, $sp, 16
+; LA64D-NEXT:    addi.d $a3, $sp, 8
 ; LA64D-NEXT:    move $a1, $fp
-; LA64D-NEXT:    move $a2, $s1
-; LA64D-NEXT:    move $a3, $s2
 ; LA64D-NEXT:    move $a4, $zero
 ; LA64D-NEXT:    move $a5, $zero
 ; LA64D-NEXT:    bl %plt(__atomic_compare_exchange)
-; LA64D-NEXT:    fld.d $fa0, $sp, 8
+; LA64D-NEXT:    fld.d $fa0, $sp, 16
 ; LA64D-NEXT:    beqz $a0, .LBB39_1
 ; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64D-NEXT:    fld.d $fs0, $sp, 16 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s2, $sp, 24 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s1, $sp, 32 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s0, $sp, 40 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $fp, $sp, 48 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $ra, $sp, 56 # 8-byte Folded Reload
-; LA64D-NEXT:    addi.d $sp, $sp, 64
+; LA64D-NEXT:    fld.d $fs0, $sp, 24 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64D-NEXT:    addi.d $sp, $sp, 48
 ; LA64D-NEXT:    ret
   %v = atomicrmw fmax ptr %p, double 1.0 monotonic, align 4
   ret double %v

diff  --git a/llvm/test/CodeGen/LoongArch/jump-table.ll b/llvm/test/CodeGen/LoongArch/jump-table.ll
index bb3d49157ac6a..51f1ee6da0b85 100644
--- a/llvm/test/CodeGen/LoongArch/jump-table.ll
+++ b/llvm/test/CodeGen/LoongArch/jump-table.ll
@@ -19,31 +19,31 @@ define void @switch_4_arms(i32 %in, ptr %out) nounwind {
 ; LA32-NEXT:    ori $a2, $zero, 2
 ; LA32-NEXT:    blt $a2, $a0, .LBB0_4
 ; LA32-NEXT:  # %bb.1: # %entry
-; LA32-NEXT:    ori $a3, $zero, 1
-; LA32-NEXT:    beq $a0, $a3, .LBB0_7
+; LA32-NEXT:    ori $a2, $zero, 1
+; LA32-NEXT:    beq $a0, $a2, .LBB0_7
 ; LA32-NEXT:  # %bb.2: # %entry
-; LA32-NEXT:    bne $a0, $a2, .LBB0_9
+; LA32-NEXT:    ori $a2, $zero, 2
+; LA32-NEXT:    bne $a0, $a2, .LBB0_10
 ; LA32-NEXT:  # %bb.3: # %bb2
 ; LA32-NEXT:    ori $a0, $zero, 3
-; LA32-NEXT:    st.w $a0, $a1, 0
-; LA32-NEXT:    ret
+; LA32-NEXT:    b .LBB0_9
 ; LA32-NEXT:  .LBB0_4: # %entry
-; LA32-NEXT:    ori $a3, $zero, 3
-; LA32-NEXT:    beq $a0, $a3, .LBB0_8
+; LA32-NEXT:    ori $a2, $zero, 3
+; LA32-NEXT:    beq $a0, $a2, .LBB0_8
 ; LA32-NEXT:  # %bb.5: # %entry
 ; LA32-NEXT:    ori $a2, $zero, 4
-; LA32-NEXT:    bne $a0, $a2, .LBB0_9
+; LA32-NEXT:    bne $a0, $a2, .LBB0_10
 ; LA32-NEXT:  # %bb.6: # %bb4
 ; LA32-NEXT:    ori $a0, $zero, 1
-; LA32-NEXT:    st.w $a0, $a1, 0
-; LA32-NEXT:    ret
+; LA32-NEXT:    b .LBB0_9
 ; LA32-NEXT:  .LBB0_7: # %bb1
 ; LA32-NEXT:    ori $a0, $zero, 4
-; LA32-NEXT:    st.w $a0, $a1, 0
-; LA32-NEXT:    ret
+; LA32-NEXT:    b .LBB0_9
 ; LA32-NEXT:  .LBB0_8: # %bb3
-; LA32-NEXT:    st.w $a2, $a1, 0
+; LA32-NEXT:    ori $a0, $zero, 2
 ; LA32-NEXT:  .LBB0_9: # %exit
+; LA32-NEXT:    st.w $a0, $a1, 0
+; LA32-NEXT:  .LBB0_10: # %exit
 ; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: switch_4_arms:
@@ -52,80 +52,86 @@ define void @switch_4_arms(i32 %in, ptr %out) nounwind {
 ; LA64-NEXT:    ori $a2, $zero, 2
 ; LA64-NEXT:    blt $a2, $a0, .LBB0_4
 ; LA64-NEXT:  # %bb.1: # %entry
-; LA64-NEXT:    ori $a3, $zero, 1
-; LA64-NEXT:    beq $a0, $a3, .LBB0_7
+; LA64-NEXT:    ori $a2, $zero, 1
+; LA64-NEXT:    beq $a0, $a2, .LBB0_7
 ; LA64-NEXT:  # %bb.2: # %entry
-; LA64-NEXT:    bne $a0, $a2, .LBB0_9
+; LA64-NEXT:    ori $a2, $zero, 2
+; LA64-NEXT:    bne $a0, $a2, .LBB0_10
 ; LA64-NEXT:  # %bb.3: # %bb2
 ; LA64-NEXT:    ori $a0, $zero, 3
-; LA64-NEXT:    st.w $a0, $a1, 0
-; LA64-NEXT:    ret
+; LA64-NEXT:    b .LBB0_9
 ; LA64-NEXT:  .LBB0_4: # %entry
-; LA64-NEXT:    ori $a3, $zero, 3
-; LA64-NEXT:    beq $a0, $a3, .LBB0_8
+; LA64-NEXT:    ori $a2, $zero, 3
+; LA64-NEXT:    beq $a0, $a2, .LBB0_8
 ; LA64-NEXT:  # %bb.5: # %entry
 ; LA64-NEXT:    ori $a2, $zero, 4
-; LA64-NEXT:    bne $a0, $a2, .LBB0_9
+; LA64-NEXT:    bne $a0, $a2, .LBB0_10
 ; LA64-NEXT:  # %bb.6: # %bb4
 ; LA64-NEXT:    ori $a0, $zero, 1
-; LA64-NEXT:    st.w $a0, $a1, 0
-; LA64-NEXT:    ret
+; LA64-NEXT:    b .LBB0_9
 ; LA64-NEXT:  .LBB0_7: # %bb1
 ; LA64-NEXT:    ori $a0, $zero, 4
-; LA64-NEXT:    st.w $a0, $a1, 0
-; LA64-NEXT:    ret
+; LA64-NEXT:    b .LBB0_9
 ; LA64-NEXT:  .LBB0_8: # %bb3
-; LA64-NEXT:    st.w $a2, $a1, 0
+; LA64-NEXT:    ori $a0, $zero, 2
 ; LA64-NEXT:  .LBB0_9: # %exit
+; LA64-NEXT:    st.w $a0, $a1, 0
+; LA64-NEXT:  .LBB0_10: # %exit
 ; LA64-NEXT:    ret
 ;
 ; LA32-JT-LABEL: switch_4_arms:
 ; LA32-JT:       # %bb.0: # %entry
-; LA32-JT-NEXT:    addi.w $a2, $a0, -1
-; LA32-JT-NEXT:    ori $a0, $zero, 3
-; LA32-JT-NEXT:    bltu $a0, $a2, .LBB0_6
+; LA32-JT-NEXT:    addi.w $a0, $a0, -1
+; LA32-JT-NEXT:    ori $a2, $zero, 3
+; LA32-JT-NEXT:    bltu $a2, $a0, .LBB0_7
 ; LA32-JT-NEXT:  # %bb.1: # %entry
-; LA32-JT-NEXT:    pcalau12i $a3, %pc_hi20(.LJTI0_0)
-; LA32-JT-NEXT:    addi.w $a3, $a3, %pc_lo12(.LJTI0_0)
-; LA32-JT-NEXT:    alsl.w $a2, $a2, $a3, 2
-; LA32-JT-NEXT:    ld.w $a2, $a2, 0
-; LA32-JT-NEXT:    jr $a2
+; LA32-JT-NEXT:    pcalau12i $a2, %pc_hi20(.LJTI0_0)
+; LA32-JT-NEXT:    addi.w $a2, $a2, %pc_lo12(.LJTI0_0)
+; LA32-JT-NEXT:    alsl.w $a0, $a0, $a2, 2
+; LA32-JT-NEXT:    ld.w $a0, $a0, 0
+; LA32-JT-NEXT:    jr $a0
 ; LA32-JT-NEXT:  .LBB0_2: # %bb1
 ; LA32-JT-NEXT:    ori $a0, $zero, 4
-; LA32-JT-NEXT:    b .LBB0_5
+; LA32-JT-NEXT:    b .LBB0_6
 ; LA32-JT-NEXT:  .LBB0_3: # %bb3
 ; LA32-JT-NEXT:    ori $a0, $zero, 2
-; LA32-JT-NEXT:    b .LBB0_5
+; LA32-JT-NEXT:    b .LBB0_6
 ; LA32-JT-NEXT:  .LBB0_4: # %bb4
 ; LA32-JT-NEXT:    ori $a0, $zero, 1
-; LA32-JT-NEXT:  .LBB0_5: # %exit
-; LA32-JT-NEXT:    st.w $a0, $a1, 0
+; LA32-JT-NEXT:    b .LBB0_6
+; LA32-JT-NEXT:  .LBB0_5: # %bb2
+; LA32-JT-NEXT:    ori $a0, $zero, 3
 ; LA32-JT-NEXT:  .LBB0_6: # %exit
+; LA32-JT-NEXT:    st.w $a0, $a1, 0
+; LA32-JT-NEXT:  .LBB0_7: # %exit
 ; LA32-JT-NEXT:    ret
 ;
 ; LA64-JT-LABEL: switch_4_arms:
 ; LA64-JT:       # %bb.0: # %entry
 ; LA64-JT-NEXT:    addi.w $a0, $a0, 0
-; LA64-JT-NEXT:    addi.d $a2, $a0, -1
-; LA64-JT-NEXT:    ori $a0, $zero, 3
-; LA64-JT-NEXT:    bltu $a0, $a2, .LBB0_6
+; LA64-JT-NEXT:    addi.d $a0, $a0, -1
+; LA64-JT-NEXT:    ori $a2, $zero, 3
+; LA64-JT-NEXT:    bltu $a2, $a0, .LBB0_7
 ; LA64-JT-NEXT:  # %bb.1: # %entry
-; LA64-JT-NEXT:    slli.d $a2, $a2, 3
-; LA64-JT-NEXT:    pcalau12i $a3, %pc_hi20(.LJTI0_0)
-; LA64-JT-NEXT:    addi.d $a3, $a3, %pc_lo12(.LJTI0_0)
-; LA64-JT-NEXT:    ldx.d $a2, $a2, $a3
-; LA64-JT-NEXT:    jr $a2
+; LA64-JT-NEXT:    slli.d $a0, $a0, 3
+; LA64-JT-NEXT:    pcalau12i $a2, %pc_hi20(.LJTI0_0)
+; LA64-JT-NEXT:    addi.d $a2, $a2, %pc_lo12(.LJTI0_0)
+; LA64-JT-NEXT:    ldx.d $a0, $a0, $a2
+; LA64-JT-NEXT:    jr $a0
 ; LA64-JT-NEXT:  .LBB0_2: # %bb1
 ; LA64-JT-NEXT:    ori $a0, $zero, 4
-; LA64-JT-NEXT:    b .LBB0_5
+; LA64-JT-NEXT:    b .LBB0_6
 ; LA64-JT-NEXT:  .LBB0_3: # %bb3
 ; LA64-JT-NEXT:    ori $a0, $zero, 2
-; LA64-JT-NEXT:    b .LBB0_5
+; LA64-JT-NEXT:    b .LBB0_6
 ; LA64-JT-NEXT:  .LBB0_4: # %bb4
 ; LA64-JT-NEXT:    ori $a0, $zero, 1
-; LA64-JT-NEXT:  .LBB0_5: # %exit
-; LA64-JT-NEXT:    st.w $a0, $a1, 0
+; LA64-JT-NEXT:    b .LBB0_6
+; LA64-JT-NEXT:  .LBB0_5: # %bb2
+; LA64-JT-NEXT:    ori $a0, $zero, 3
 ; LA64-JT-NEXT:  .LBB0_6: # %exit
+; LA64-JT-NEXT:    st.w $a0, $a1, 0
+; LA64-JT-NEXT:  .LBB0_7: # %exit
 ; LA64-JT-NEXT:    ret
 entry:
   switch i32 %in, label %exit [

diff  --git a/llvm/test/CodeGen/LoongArch/register-coalescer-crash-pr79718.mir b/llvm/test/CodeGen/LoongArch/register-coalescer-crash-pr79718.mir
index 9c62983c6d450..be62b73b14c3f 100644
--- a/llvm/test/CodeGen/LoongArch/register-coalescer-crash-pr79718.mir
+++ b/llvm/test/CodeGen/LoongArch/register-coalescer-crash-pr79718.mir
@@ -78,7 +78,7 @@ body:             |
   ; CHECK-NEXT:   successors: %bb.11(0x80000000)
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:gpr = ADDI_D [[COPY6]], 1
-  ; CHECK-NEXT:   [[COPY6:%[0-9]+]]:gpr = COPY [[ORI]]
+  ; CHECK-NEXT:   [[COPY6:%[0-9]+]]:gpr = ORI $r0, 1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.11:
   ; CHECK-NEXT:   successors: %bb.12(0x80000000)

diff  --git a/llvm/test/CodeGen/LoongArch/sextw-removal.ll b/llvm/test/CodeGen/LoongArch/sextw-removal.ll
index ba889fc8393b4..2bb39395c1d1b 100644
--- a/llvm/test/CodeGen/LoongArch/sextw-removal.ll
+++ b/llvm/test/CodeGen/LoongArch/sextw-removal.ll
@@ -825,22 +825,22 @@ define signext i32 @test14b(i32 %0, i32 signext %1) {
 ; CHECK-NEXT:    ori $a2, $zero, 2
 ; CHECK-NEXT:    blt $a1, $a2, .LBB14_4
 ; CHECK-NEXT:  # %bb.1: # %.preheader
-; CHECK-NEXT:    ori $a3, $zero, 1
-; CHECK-NEXT:    addi.d $a2, $zero, -1
-; CHECK-NEXT:    ori $a4, $zero, 1000
+; CHECK-NEXT:    ori $a2, $zero, 1
+; CHECK-NEXT:    ori $a3, $zero, 1000
 ; CHECK-NEXT:    .p2align 4, , 16
 ; CHECK-NEXT:  .LBB14_2: # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    addi.w $a5, $a0, 0
-; CHECK-NEXT:    blt $a4, $a5, .LBB14_5
+; CHECK-NEXT:    addi.w $a4, $a0, 0
+; CHECK-NEXT:    blt $a3, $a4, .LBB14_5
 ; CHECK-NEXT:  # %bb.3: # in Loop: Header=BB14_2 Depth=1
-; CHECK-NEXT:    add.d $a0, $a3, $a0
-; CHECK-NEXT:    addi.w $a3, $a3, 1
-; CHECK-NEXT:    blt $a3, $a1, .LBB14_2
+; CHECK-NEXT:    add.d $a0, $a2, $a0
+; CHECK-NEXT:    addi.w $a2, $a2, 1
+; CHECK-NEXT:    blt $a2, $a1, .LBB14_2
 ; CHECK-NEXT:  .LBB14_4:
 ; CHECK-NEXT:    addi.w $a0, $a0, 0
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:  .LBB14_5:
-; CHECK-NEXT:    addi.w $a0, $a2, 0
+; CHECK-NEXT:    addi.d $a0, $zero, -1
+; CHECK-NEXT:    addi.w $a0, $a0, 0
 ; CHECK-NEXT:    ret
 ;
 ; NORMV-LABEL: test14b:
@@ -848,24 +848,24 @@ define signext i32 @test14b(i32 %0, i32 signext %1) {
 ; NORMV-NEXT:    ori $a2, $zero, 2
 ; NORMV-NEXT:    blt $a1, $a2, .LBB14_4
 ; NORMV-NEXT:  # %bb.1: # %.preheader
-; NORMV-NEXT:    ori $a3, $zero, 1
-; NORMV-NEXT:    addi.d $a2, $zero, -1
-; NORMV-NEXT:    ori $a4, $zero, 1000
+; NORMV-NEXT:    ori $a2, $zero, 1
+; NORMV-NEXT:    ori $a3, $zero, 1000
 ; NORMV-NEXT:    .p2align 4, , 16
 ; NORMV-NEXT:  .LBB14_2: # =>This Inner Loop Header: Depth=1
-; NORMV-NEXT:    addi.w $a5, $a0, 0
-; NORMV-NEXT:    blt $a4, $a5, .LBB14_5
+; NORMV-NEXT:    addi.w $a4, $a0, 0
+; NORMV-NEXT:    blt $a3, $a4, .LBB14_5
 ; NORMV-NEXT:  # %bb.3: # in Loop: Header=BB14_2 Depth=1
-; NORMV-NEXT:    add.d $a0, $a3, $a0
-; NORMV-NEXT:    addi.d $a3, $a3, 1
-; NORMV-NEXT:    addi.w $a3, $a3, 0
+; NORMV-NEXT:    add.d $a0, $a2, $a0
+; NORMV-NEXT:    addi.d $a2, $a2, 1
+; NORMV-NEXT:    addi.w $a2, $a2, 0
 ; NORMV-NEXT:    addi.d $a0, $a0, 0
-; NORMV-NEXT:    blt $a3, $a1, .LBB14_2
+; NORMV-NEXT:    blt $a2, $a1, .LBB14_2
 ; NORMV-NEXT:  .LBB14_4:
 ; NORMV-NEXT:    addi.w $a0, $a0, 0
 ; NORMV-NEXT:    ret
 ; NORMV-NEXT:  .LBB14_5:
-; NORMV-NEXT:    addi.w $a0, $a2, 0
+; NORMV-NEXT:    addi.d $a0, $zero, -1
+; NORMV-NEXT:    addi.w $a0, $a0, 0
 ; NORMV-NEXT:    ret
   %3 = icmp sgt i32 %1, 1
   br i1 %3, label %4, label %12
@@ -893,22 +893,22 @@ define signext i32 @test14c(i32 zeroext %0, i32 signext %1) {
 ; CHECK-NEXT:    ori $a2, $zero, 2
 ; CHECK-NEXT:    blt $a1, $a2, .LBB15_4
 ; CHECK-NEXT:  # %bb.1: # %.preheader
-; CHECK-NEXT:    ori $a3, $zero, 1
-; CHECK-NEXT:    addi.d $a2, $zero, -1
-; CHECK-NEXT:    ori $a4, $zero, 1000
+; CHECK-NEXT:    ori $a2, $zero, 1
+; CHECK-NEXT:    ori $a3, $zero, 1000
 ; CHECK-NEXT:    .p2align 4, , 16
 ; CHECK-NEXT:  .LBB15_2: # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    addi.w $a5, $a0, 0
-; CHECK-NEXT:    blt $a4, $a5, .LBB15_5
+; CHECK-NEXT:    addi.w $a4, $a0, 0
+; CHECK-NEXT:    blt $a3, $a4, .LBB15_5
 ; CHECK-NEXT:  # %bb.3: # in Loop: Header=BB15_2 Depth=1
-; CHECK-NEXT:    add.d $a0, $a3, $a0
-; CHECK-NEXT:    addi.w $a3, $a3, 1
-; CHECK-NEXT:    blt $a3, $a1, .LBB15_2
+; CHECK-NEXT:    add.d $a0, $a2, $a0
+; CHECK-NEXT:    addi.w $a2, $a2, 1
+; CHECK-NEXT:    blt $a2, $a1, .LBB15_2
 ; CHECK-NEXT:  .LBB15_4:
 ; CHECK-NEXT:    addi.w $a0, $a0, 0
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:  .LBB15_5:
-; CHECK-NEXT:    addi.w $a0, $a2, 0
+; CHECK-NEXT:    addi.d $a0, $zero, -1
+; CHECK-NEXT:    addi.w $a0, $a0, 0
 ; CHECK-NEXT:    ret
 ;
 ; NORMV-LABEL: test14c:
@@ -916,24 +916,24 @@ define signext i32 @test14c(i32 zeroext %0, i32 signext %1) {
 ; NORMV-NEXT:    ori $a2, $zero, 2
 ; NORMV-NEXT:    blt $a1, $a2, .LBB15_4
 ; NORMV-NEXT:  # %bb.1: # %.preheader
-; NORMV-NEXT:    ori $a3, $zero, 1
-; NORMV-NEXT:    addi.d $a2, $zero, -1
-; NORMV-NEXT:    ori $a4, $zero, 1000
+; NORMV-NEXT:    ori $a2, $zero, 1
+; NORMV-NEXT:    ori $a3, $zero, 1000
 ; NORMV-NEXT:    .p2align 4, , 16
 ; NORMV-NEXT:  .LBB15_2: # =>This Inner Loop Header: Depth=1
-; NORMV-NEXT:    addi.w $a5, $a0, 0
-; NORMV-NEXT:    blt $a4, $a5, .LBB15_5
+; NORMV-NEXT:    addi.w $a4, $a0, 0
+; NORMV-NEXT:    blt $a3, $a4, .LBB15_5
 ; NORMV-NEXT:  # %bb.3: # in Loop: Header=BB15_2 Depth=1
-; NORMV-NEXT:    add.d $a0, $a3, $a0
-; NORMV-NEXT:    addi.d $a3, $a3, 1
-; NORMV-NEXT:    addi.w $a3, $a3, 0
+; NORMV-NEXT:    add.d $a0, $a2, $a0
+; NORMV-NEXT:    addi.d $a2, $a2, 1
+; NORMV-NEXT:    addi.w $a2, $a2, 0
 ; NORMV-NEXT:    addi.d $a0, $a0, 0
-; NORMV-NEXT:    blt $a3, $a1, .LBB15_2
+; NORMV-NEXT:    blt $a2, $a1, .LBB15_2
 ; NORMV-NEXT:  .LBB15_4:
 ; NORMV-NEXT:    addi.w $a0, $a0, 0
 ; NORMV-NEXT:    ret
 ; NORMV-NEXT:  .LBB15_5:
-; NORMV-NEXT:    addi.w $a0, $a2, 0
+; NORMV-NEXT:    addi.d $a0, $zero, -1
+; NORMV-NEXT:    addi.w $a0, $a0, 0
 ; NORMV-NEXT:    ret
   %3 = icmp sgt i32 %1, 1
   br i1 %3, label %4, label %12

diff  --git a/llvm/test/CodeGen/LoongArch/vararg.ll b/llvm/test/CodeGen/LoongArch/vararg.ll
index 112459ab1f4d4..bbf3b8e333306 100644
--- a/llvm/test/CodeGen/LoongArch/vararg.ll
+++ b/llvm/test/CodeGen/LoongArch/vararg.ll
@@ -292,14 +292,14 @@ define void @va_aligned_stack_caller() nounwind {
 ; LA64-FPELIM-NEXT:    st.d $zero, $sp, 80
 ; LA64-FPELIM-NEXT:    st.d $zero, $sp, 72
 ; LA64-FPELIM-NEXT:    ori $a5, $zero, 1000
+; LA64-FPELIM-NEXT:    ori $a0, $zero, 1
 ; LA64-FPELIM-NEXT:    ori $a1, $zero, 11
 ; LA64-FPELIM-NEXT:    addi.d $a2, $sp, 64
 ; LA64-FPELIM-NEXT:    ori $a3, $zero, 12
 ; LA64-FPELIM-NEXT:    ori $a4, $zero, 13
-; LA64-FPELIM-NEXT:    ori $a0, $zero, 1
+; LA64-FPELIM-NEXT:    ori $a7, $zero, 1
 ; LA64-FPELIM-NEXT:    st.d $a5, $sp, 64
 ; LA64-FPELIM-NEXT:    move $a6, $zero
-; LA64-FPELIM-NEXT:    move $a7, $a0
 ; LA64-FPELIM-NEXT:    bl %plt(va_aligned_stack_callee)
 ; LA64-FPELIM-NEXT:    ld.d $ra, $sp, 104 # 8-byte Folded Reload
 ; LA64-FPELIM-NEXT:    addi.d $sp, $sp, 112
@@ -333,14 +333,14 @@ define void @va_aligned_stack_caller() nounwind {
 ; LA64-WITHFP-NEXT:    st.d $zero, $fp, -32
 ; LA64-WITHFP-NEXT:    st.d $zero, $fp, -40
 ; LA64-WITHFP-NEXT:    ori $a5, $zero, 1000
+; LA64-WITHFP-NEXT:    ori $a0, $zero, 1
 ; LA64-WITHFP-NEXT:    ori $a1, $zero, 11
 ; LA64-WITHFP-NEXT:    addi.d $a2, $fp, -48
 ; LA64-WITHFP-NEXT:    ori $a3, $zero, 12
 ; LA64-WITHFP-NEXT:    ori $a4, $zero, 13
-; LA64-WITHFP-NEXT:    ori $a0, $zero, 1
+; LA64-WITHFP-NEXT:    ori $a7, $zero, 1
 ; LA64-WITHFP-NEXT:    st.d $a5, $fp, -48
 ; LA64-WITHFP-NEXT:    move $a6, $zero
-; LA64-WITHFP-NEXT:    move $a7, $a0
 ; LA64-WITHFP-NEXT:    bl %plt(va_aligned_stack_callee)
 ; LA64-WITHFP-NEXT:    ld.d $fp, $sp, 96 # 8-byte Folded Reload
 ; LA64-WITHFP-NEXT:    ld.d $ra, $sp, 104 # 8-byte Folded Reload


        


More information about the llvm-commits mailing list