[llvm] [LoongArch] Set isAsCheapAsAMove on LU{12, 32, 52}I.D/ADDI.D and {X}ORI instructions (PR #94733)

via llvm-commits llvm-commits at lists.llvm.org
Fri Jun 7 00:40:58 PDT 2024


https://github.com/heiher created https://github.com/llvm/llvm-project/pull/94733

None

>From 0465b9ec8c62d26809613f5302f993ca5d297788 Mon Sep 17 00:00:00 2001
From: WANG Rui <wangrui at loongson.cn>
Date: Fri, 31 May 2024 15:40:49 +0800
Subject: [PATCH] [LoongArch] Set isAsCheapAsAMove on LU{12,32,52}I.D/ADDI.D
 and {X}ORI instructions

---
 .../Target/LoongArch/LoongArchInstrInfo.td    |   10 +-
 .../CodeGen/LoongArch/calling-conv-lp64d.ll   |    2 +-
 .../LoongArch/can-not-realign-stack.ll        |   23 +-
 .../LoongArch/ir-instruction/atomicrmw-fp.ll  | 1796 ++++++-----------
 llvm/test/CodeGen/LoongArch/jump-table.ll     |  106 +-
 .../register-coalescer-crash-pr79718.mir      |    2 +-
 llvm/test/CodeGen/LoongArch/sextw-removal.ll  |   76 +-
 llvm/test/CodeGen/LoongArch/vararg.ll         |    8 +-
 llvm/test/CodeGen/LoongArch/vector-fp-imm.ll  |  187 +-
 9 files changed, 833 insertions(+), 1377 deletions(-)

diff --git a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
index f72f46e39e2a4..f4bdd7d1209cf 100644
--- a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
+++ b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
@@ -734,7 +734,7 @@ def ADD_W : ALU_3R<0x00100000>;
 def SUB_W : ALU_3R<0x00110000>;
 def ADDI_W : ALU_2RI12<0x02800000, simm12_addlike>;
 def ALSL_W : ALU_3RI2<0x00040000, uimm2_plus1>;
-let isReMaterializable = 1 in {
+let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
 def LU12I_W : ALU_1RI20<0x14000000, simm20_lu12iw>;
 }
 def SLT  : ALU_3R<0x00120000>;
@@ -751,7 +751,7 @@ def XOR  : ALU_3R<0x00158000>;
 def ANDN : ALU_3R<0x00168000>;
 def ORN  : ALU_3R<0x00160000>;
 def ANDI : ALU_2RI12<0x03400000, uimm12>;
-let isReMaterializable = 1 in {
+let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
 def ORI  : ALU_2RI12<0x03800000, uimm12_ori>;
 def XORI : ALU_2RI12<0x03c00000, uimm12>;
 }
@@ -858,7 +858,7 @@ def ADD_D : ALU_3R<0x00108000>;
 def SUB_D : ALU_3R<0x00118000>;
 // ADDI_D isn't always rematerializable, but isReMaterializable will be used as
 // a hint which is verified in isReallyTriviallyReMaterializable.
-let isReMaterializable = 1 in {
+let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
 def ADDI_D : ALU_2RI12<0x02c00000, simm12_addlike>;
 }
 def ADDU16I_D : ALU_2RI16<0x10000000, simm16>;
@@ -866,12 +866,12 @@ def ALSL_WU : ALU_3RI2<0x00060000, uimm2_plus1>;
 def ALSL_D  : ALU_3RI2<0x002c0000, uimm2_plus1>;
 let Constraints = "$rd = $dst" in {
 let hasSideEffects = 0, mayLoad = 0, mayStore = 0,
-    isReMaterializable = 1 in
+    isReMaterializable = 1, isAsCheapAsAMove = 1 in
 def LU32I_D : Fmt1RI20<0x16000000, (outs GPR:$dst),
                        (ins GPR:$rd, simm20_lu32id:$imm20),
                        "$rd, $imm20">;
 }
-let isReMaterializable = 1 in {
+let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
 def LU52I_D : ALU_2RI12<0x03000000, simm12_lu52id>;
 }
 def PCADDU18I : ALU_1RI20<0x1e000000, simm20_pcaddu18i>;
diff --git a/llvm/test/CodeGen/LoongArch/calling-conv-lp64d.ll b/llvm/test/CodeGen/LoongArch/calling-conv-lp64d.ll
index 5e89fd41d6a7c..cc6ba057019c6 100644
--- a/llvm/test/CodeGen/LoongArch/calling-conv-lp64d.ll
+++ b/llvm/test/CodeGen/LoongArch/calling-conv-lp64d.ll
@@ -28,9 +28,9 @@ define i64 @caller_float_in_fpr() nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi.d $sp, $sp, -16
 ; CHECK-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; CHECK-NEXT:    ori $a0, $zero, 1
 ; CHECK-NEXT:    movgr2fr.w $fa0, $zero
 ; CHECK-NEXT:    movgr2fr.d $fa1, $zero
+; CHECK-NEXT:    ori $a0, $zero, 1
 ; CHECK-NEXT:    bl %plt(callee_float_in_fpr)
 ; CHECK-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
 ; CHECK-NEXT:    addi.d $sp, $sp, 16
diff --git a/llvm/test/CodeGen/LoongArch/can-not-realign-stack.ll b/llvm/test/CodeGen/LoongArch/can-not-realign-stack.ll
index a3d85f6bd06d8..49155a3966a84 100644
--- a/llvm/test/CodeGen/LoongArch/can-not-realign-stack.ll
+++ b/llvm/test/CodeGen/LoongArch/can-not-realign-stack.ll
@@ -16,7 +16,6 @@ define dso_local noundef signext i32 @main() nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addi.d $sp, $sp, -272
 ; CHECK-NEXT:    st.d $ra, $sp, 264 # 8-byte Folded Spill
-; CHECK-NEXT:    st.d $fp, $sp, 256 # 8-byte Folded Spill
 ; CHECK-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI0_0)
 ; CHECK-NEXT:    addi.d $a0, $a0, %pc_lo12(.LCPI0_0)
 ; CHECK-NEXT:    xvld $xr0, $a0, 0
@@ -33,25 +32,23 @@ define dso_local noundef signext i32 @main() nounwind {
 ; CHECK-NEXT:    addi.d $a0, $a0, %pc_lo12(.LCPI0_3)
 ; CHECK-NEXT:    xvld $xr3, $a0, 0
 ; CHECK-NEXT:    xvst $xr3, $sp, 0 # 32-byte Folded Spill
-; CHECK-NEXT:    xvst $xr0, $sp, 128
-; CHECK-NEXT:    xvst $xr1, $sp, 160
-; CHECK-NEXT:    xvst $xr2, $sp, 192
-; CHECK-NEXT:    xvst $xr3, $sp, 224
-; CHECK-NEXT:    addi.d $fp, $sp, 128
-; CHECK-NEXT:    move $a0, $fp
+; CHECK-NEXT:    xvst $xr0, $sp, 136
+; CHECK-NEXT:    xvst $xr1, $sp, 168
+; CHECK-NEXT:    xvst $xr2, $sp, 200
+; CHECK-NEXT:    xvst $xr3, $sp, 232
+; CHECK-NEXT:    addi.d $a0, $sp, 136
 ; CHECK-NEXT:    bl %plt(foo)
 ; CHECK-NEXT:    xvld $xr0, $sp, 96 # 32-byte Folded Reload
-; CHECK-NEXT:    xvst $xr0, $sp, 128
+; CHECK-NEXT:    xvst $xr0, $sp, 136
 ; CHECK-NEXT:    xvld $xr0, $sp, 64 # 32-byte Folded Reload
-; CHECK-NEXT:    xvst $xr0, $sp, 160
+; CHECK-NEXT:    xvst $xr0, $sp, 168
 ; CHECK-NEXT:    xvld $xr0, $sp, 32 # 32-byte Folded Reload
-; CHECK-NEXT:    xvst $xr0, $sp, 192
+; CHECK-NEXT:    xvst $xr0, $sp, 200
 ; CHECK-NEXT:    xvld $xr0, $sp, 0 # 32-byte Folded Reload
-; CHECK-NEXT:    xvst $xr0, $sp, 224
-; CHECK-NEXT:    move $a0, $fp
+; CHECK-NEXT:    xvst $xr0, $sp, 232
+; CHECK-NEXT:    addi.d $a0, $sp, 136
 ; CHECK-NEXT:    bl %plt(bar)
 ; CHECK-NEXT:    move $a0, $zero
-; CHECK-NEXT:    ld.d $fp, $sp, 256 # 8-byte Folded Reload
 ; CHECK-NEXT:    ld.d $ra, $sp, 264 # 8-byte Folded Reload
 ; CHECK-NEXT:    addi.d $sp, $sp, 272
 ; CHECK-NEXT:    ret
diff --git a/llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw-fp.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw-fp.ll
index 81cc29419a0e0..ea110e8a90c13 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw-fp.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw-fp.ll
@@ -297,95 +297,68 @@ define float @float_fmax_acquire(ptr %p) nounwind {
 define double @double_fadd_acquire(ptr %p) nounwind {
 ; LA64F-LABEL: double_fadd_acquire:
 ; LA64F:       # %bb.0:
-; LA64F-NEXT:    addi.d $sp, $sp, -80
-; LA64F-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s4, $sp, 24 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s5, $sp, 16 # 8-byte Folded Spill
+; LA64F-NEXT:    addi.d $sp, $sp, -48
+; LA64F-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s0, $sp, 24 # 8-byte Folded Spill
 ; LA64F-NEXT:    move $fp, $a0
-; LA64F-NEXT:    ld.d $s5, $a0, 0
-; LA64F-NEXT:    lu52i.d $s0, $zero, 1023
-; LA64F-NEXT:    ori $s1, $zero, 8
-; LA64F-NEXT:    addi.d $s2, $sp, 8
-; LA64F-NEXT:    addi.d $s3, $sp, 0
-; LA64F-NEXT:    ori $s4, $zero, 2
+; LA64F-NEXT:    ld.d $s0, $a0, 0
 ; LA64F-NEXT:    .p2align 4, , 16
 ; LA64F-NEXT:  .LBB4_1: # %atomicrmw.start
 ; LA64F-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64F-NEXT:    move $a0, $s5
-; LA64F-NEXT:    move $a1, $s0
+; LA64F-NEXT:    lu52i.d $a1, $zero, 1023
+; LA64F-NEXT:    move $a0, $s0
 ; LA64F-NEXT:    bl %plt(__adddf3)
-; LA64F-NEXT:    st.d $s5, $sp, 8
-; LA64F-NEXT:    st.d $a0, $sp, 0
-; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    st.d $s0, $sp, 16
+; LA64F-NEXT:    st.d $a0, $sp, 8
+; LA64F-NEXT:    ori $a0, $zero, 8
+; LA64F-NEXT:    addi.d $a2, $sp, 16
+; LA64F-NEXT:    addi.d $a3, $sp, 8
+; LA64F-NEXT:    ori $a4, $zero, 2
+; LA64F-NEXT:    ori $a5, $zero, 2
 ; LA64F-NEXT:    move $a1, $fp
-; LA64F-NEXT:    move $a2, $s2
-; LA64F-NEXT:    move $a3, $s3
-; LA64F-NEXT:    move $a4, $s4
-; LA64F-NEXT:    move $a5, $s4
 ; LA64F-NEXT:    bl %plt(__atomic_compare_exchange)
-; LA64F-NEXT:    ld.d $s5, $sp, 8
+; LA64F-NEXT:    ld.d $s0, $sp, 16
 ; LA64F-NEXT:    beqz $a0, .LBB4_1
 ; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64F-NEXT:    move $a0, $s5
-; LA64F-NEXT:    ld.d $s5, $sp, 16 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s4, $sp, 24 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
-; LA64F-NEXT:    addi.d $sp, $sp, 80
+; LA64F-NEXT:    move $a0, $s0
+; LA64F-NEXT:    ld.d $s0, $sp, 24 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64F-NEXT:    addi.d $sp, $sp, 48
 ; LA64F-NEXT:    ret
 ;
 ; LA64D-LABEL: double_fadd_acquire:
 ; LA64D:       # %bb.0:
-; LA64D-NEXT:    addi.d $sp, $sp, -80
-; LA64D-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
+; LA64D-NEXT:    addi.d $sp, $sp, -48
+; LA64D-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
 ; LA64D-NEXT:    fst.d $fs0, $sp, 24 # 8-byte Folded Spill
 ; LA64D-NEXT:    move $fp, $a0
 ; LA64D-NEXT:    fld.d $fa0, $a0, 0
 ; LA64D-NEXT:    addi.d $a0, $zero, 1
 ; LA64D-NEXT:    movgr2fr.d $fa1, $a0
 ; LA64D-NEXT:    ffint.d.l $fs0, $fa1
-; LA64D-NEXT:    ori $s0, $zero, 8
-; LA64D-NEXT:    addi.d $s1, $sp, 16
-; LA64D-NEXT:    addi.d $s2, $sp, 8
-; LA64D-NEXT:    ori $s3, $zero, 2
 ; LA64D-NEXT:    .p2align 4, , 16
 ; LA64D-NEXT:  .LBB4_1: # %atomicrmw.start
 ; LA64D-NEXT:    # =>This Inner Loop Header: Depth=1
 ; LA64D-NEXT:    fadd.d $fa1, $fa0, $fs0
 ; LA64D-NEXT:    fst.d $fa0, $sp, 16
 ; LA64D-NEXT:    fst.d $fa1, $sp, 8
-; LA64D-NEXT:    move $a0, $s0
+; LA64D-NEXT:    ori $a0, $zero, 8
+; LA64D-NEXT:    addi.d $a2, $sp, 16
+; LA64D-NEXT:    addi.d $a3, $sp, 8
+; LA64D-NEXT:    ori $a4, $zero, 2
+; LA64D-NEXT:    ori $a5, $zero, 2
 ; LA64D-NEXT:    move $a1, $fp
-; LA64D-NEXT:    move $a2, $s1
-; LA64D-NEXT:    move $a3, $s2
-; LA64D-NEXT:    move $a4, $s3
-; LA64D-NEXT:    move $a5, $s3
 ; LA64D-NEXT:    bl %plt(__atomic_compare_exchange)
 ; LA64D-NEXT:    fld.d $fa0, $sp, 16
 ; LA64D-NEXT:    beqz $a0, .LBB4_1
 ; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64D-NEXT:    fld.d $fs0, $sp, 24 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
-; LA64D-NEXT:    addi.d $sp, $sp, 80
+; LA64D-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64D-NEXT:    addi.d $sp, $sp, 48
 ; LA64D-NEXT:    ret
   %v = atomicrmw fadd ptr %p, double 1.0 acquire, align 4
   ret double %v
@@ -394,95 +367,68 @@ define double @double_fadd_acquire(ptr %p) nounwind {
 define double @double_fsub_acquire(ptr %p) nounwind {
 ; LA64F-LABEL: double_fsub_acquire:
 ; LA64F:       # %bb.0:
-; LA64F-NEXT:    addi.d $sp, $sp, -80
-; LA64F-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s4, $sp, 24 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s5, $sp, 16 # 8-byte Folded Spill
+; LA64F-NEXT:    addi.d $sp, $sp, -48
+; LA64F-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s0, $sp, 24 # 8-byte Folded Spill
 ; LA64F-NEXT:    move $fp, $a0
-; LA64F-NEXT:    ld.d $s5, $a0, 0
-; LA64F-NEXT:    lu52i.d $s0, $zero, -1025
-; LA64F-NEXT:    ori $s1, $zero, 8
-; LA64F-NEXT:    addi.d $s2, $sp, 8
-; LA64F-NEXT:    addi.d $s3, $sp, 0
-; LA64F-NEXT:    ori $s4, $zero, 2
+; LA64F-NEXT:    ld.d $s0, $a0, 0
 ; LA64F-NEXT:    .p2align 4, , 16
 ; LA64F-NEXT:  .LBB5_1: # %atomicrmw.start
 ; LA64F-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64F-NEXT:    move $a0, $s5
-; LA64F-NEXT:    move $a1, $s0
+; LA64F-NEXT:    lu52i.d $a1, $zero, -1025
+; LA64F-NEXT:    move $a0, $s0
 ; LA64F-NEXT:    bl %plt(__adddf3)
-; LA64F-NEXT:    st.d $s5, $sp, 8
-; LA64F-NEXT:    st.d $a0, $sp, 0
-; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    st.d $s0, $sp, 16
+; LA64F-NEXT:    st.d $a0, $sp, 8
+; LA64F-NEXT:    ori $a0, $zero, 8
+; LA64F-NEXT:    addi.d $a2, $sp, 16
+; LA64F-NEXT:    addi.d $a3, $sp, 8
+; LA64F-NEXT:    ori $a4, $zero, 2
+; LA64F-NEXT:    ori $a5, $zero, 2
 ; LA64F-NEXT:    move $a1, $fp
-; LA64F-NEXT:    move $a2, $s2
-; LA64F-NEXT:    move $a3, $s3
-; LA64F-NEXT:    move $a4, $s4
-; LA64F-NEXT:    move $a5, $s4
 ; LA64F-NEXT:    bl %plt(__atomic_compare_exchange)
-; LA64F-NEXT:    ld.d $s5, $sp, 8
+; LA64F-NEXT:    ld.d $s0, $sp, 16
 ; LA64F-NEXT:    beqz $a0, .LBB5_1
 ; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64F-NEXT:    move $a0, $s5
-; LA64F-NEXT:    ld.d $s5, $sp, 16 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s4, $sp, 24 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
-; LA64F-NEXT:    addi.d $sp, $sp, 80
+; LA64F-NEXT:    move $a0, $s0
+; LA64F-NEXT:    ld.d $s0, $sp, 24 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64F-NEXT:    addi.d $sp, $sp, 48
 ; LA64F-NEXT:    ret
 ;
 ; LA64D-LABEL: double_fsub_acquire:
 ; LA64D:       # %bb.0:
-; LA64D-NEXT:    addi.d $sp, $sp, -80
-; LA64D-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
+; LA64D-NEXT:    addi.d $sp, $sp, -48
+; LA64D-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
 ; LA64D-NEXT:    fst.d $fs0, $sp, 24 # 8-byte Folded Spill
 ; LA64D-NEXT:    move $fp, $a0
 ; LA64D-NEXT:    fld.d $fa0, $a0, 0
 ; LA64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_0)
 ; LA64D-NEXT:    addi.d $a0, $a0, %pc_lo12(.LCPI5_0)
 ; LA64D-NEXT:    fld.d $fs0, $a0, 0
-; LA64D-NEXT:    ori $s0, $zero, 8
-; LA64D-NEXT:    addi.d $s1, $sp, 16
-; LA64D-NEXT:    addi.d $s2, $sp, 8
-; LA64D-NEXT:    ori $s3, $zero, 2
 ; LA64D-NEXT:    .p2align 4, , 16
 ; LA64D-NEXT:  .LBB5_1: # %atomicrmw.start
 ; LA64D-NEXT:    # =>This Inner Loop Header: Depth=1
 ; LA64D-NEXT:    fadd.d $fa1, $fa0, $fs0
 ; LA64D-NEXT:    fst.d $fa0, $sp, 16
 ; LA64D-NEXT:    fst.d $fa1, $sp, 8
-; LA64D-NEXT:    move $a0, $s0
+; LA64D-NEXT:    ori $a0, $zero, 8
+; LA64D-NEXT:    addi.d $a2, $sp, 16
+; LA64D-NEXT:    addi.d $a3, $sp, 8
+; LA64D-NEXT:    ori $a4, $zero, 2
+; LA64D-NEXT:    ori $a5, $zero, 2
 ; LA64D-NEXT:    move $a1, $fp
-; LA64D-NEXT:    move $a2, $s1
-; LA64D-NEXT:    move $a3, $s2
-; LA64D-NEXT:    move $a4, $s3
-; LA64D-NEXT:    move $a5, $s3
 ; LA64D-NEXT:    bl %plt(__atomic_compare_exchange)
 ; LA64D-NEXT:    fld.d $fa0, $sp, 16
 ; LA64D-NEXT:    beqz $a0, .LBB5_1
 ; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64D-NEXT:    fld.d $fs0, $sp, 24 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
-; LA64D-NEXT:    addi.d $sp, $sp, 80
+; LA64D-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64D-NEXT:    addi.d $sp, $sp, 48
 ; LA64D-NEXT:    ret
   %v = atomicrmw fsub ptr %p, double 1.0 acquire, align 4
   ret double %v
@@ -491,71 +437,48 @@ define double @double_fsub_acquire(ptr %p) nounwind {
 define double @double_fmin_acquire(ptr %p) nounwind {
 ; LA64F-LABEL: double_fmin_acquire:
 ; LA64F:       # %bb.0:
-; LA64F-NEXT:    addi.d $sp, $sp, -80
-; LA64F-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s4, $sp, 24 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s5, $sp, 16 # 8-byte Folded Spill
+; LA64F-NEXT:    addi.d $sp, $sp, -48
+; LA64F-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s0, $sp, 24 # 8-byte Folded Spill
 ; LA64F-NEXT:    move $fp, $a0
-; LA64F-NEXT:    ld.d $s5, $a0, 0
-; LA64F-NEXT:    lu52i.d $s0, $zero, 1023
-; LA64F-NEXT:    ori $s1, $zero, 8
-; LA64F-NEXT:    addi.d $s2, $sp, 8
-; LA64F-NEXT:    addi.d $s3, $sp, 0
-; LA64F-NEXT:    ori $s4, $zero, 2
+; LA64F-NEXT:    ld.d $s0, $a0, 0
 ; LA64F-NEXT:    .p2align 4, , 16
 ; LA64F-NEXT:  .LBB6_1: # %atomicrmw.start
 ; LA64F-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64F-NEXT:    move $a0, $s5
-; LA64F-NEXT:    move $a1, $s0
+; LA64F-NEXT:    lu52i.d $a1, $zero, 1023
+; LA64F-NEXT:    move $a0, $s0
 ; LA64F-NEXT:    bl %plt(fmin)
-; LA64F-NEXT:    st.d $s5, $sp, 8
-; LA64F-NEXT:    st.d $a0, $sp, 0
-; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    st.d $s0, $sp, 16
+; LA64F-NEXT:    st.d $a0, $sp, 8
+; LA64F-NEXT:    ori $a0, $zero, 8
+; LA64F-NEXT:    addi.d $a2, $sp, 16
+; LA64F-NEXT:    addi.d $a3, $sp, 8
+; LA64F-NEXT:    ori $a4, $zero, 2
+; LA64F-NEXT:    ori $a5, $zero, 2
 ; LA64F-NEXT:    move $a1, $fp
-; LA64F-NEXT:    move $a2, $s2
-; LA64F-NEXT:    move $a3, $s3
-; LA64F-NEXT:    move $a4, $s4
-; LA64F-NEXT:    move $a5, $s4
 ; LA64F-NEXT:    bl %plt(__atomic_compare_exchange)
-; LA64F-NEXT:    ld.d $s5, $sp, 8
+; LA64F-NEXT:    ld.d $s0, $sp, 16
 ; LA64F-NEXT:    beqz $a0, .LBB6_1
 ; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64F-NEXT:    move $a0, $s5
-; LA64F-NEXT:    ld.d $s5, $sp, 16 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s4, $sp, 24 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
-; LA64F-NEXT:    addi.d $sp, $sp, 80
+; LA64F-NEXT:    move $a0, $s0
+; LA64F-NEXT:    ld.d $s0, $sp, 24 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64F-NEXT:    addi.d $sp, $sp, 48
 ; LA64F-NEXT:    ret
 ;
 ; LA64D-LABEL: double_fmin_acquire:
 ; LA64D:       # %bb.0:
-; LA64D-NEXT:    addi.d $sp, $sp, -80
-; LA64D-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
+; LA64D-NEXT:    addi.d $sp, $sp, -48
+; LA64D-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
 ; LA64D-NEXT:    fst.d $fs0, $sp, 24 # 8-byte Folded Spill
 ; LA64D-NEXT:    move $fp, $a0
 ; LA64D-NEXT:    fld.d $fa0, $a0, 0
 ; LA64D-NEXT:    addi.d $a0, $zero, 1
 ; LA64D-NEXT:    movgr2fr.d $fa1, $a0
 ; LA64D-NEXT:    ffint.d.l $fs0, $fa1
-; LA64D-NEXT:    ori $s0, $zero, 8
-; LA64D-NEXT:    addi.d $s1, $sp, 16
-; LA64D-NEXT:    addi.d $s2, $sp, 8
-; LA64D-NEXT:    ori $s3, $zero, 2
 ; LA64D-NEXT:    .p2align 4, , 16
 ; LA64D-NEXT:  .LBB6_1: # %atomicrmw.start
 ; LA64D-NEXT:    # =>This Inner Loop Header: Depth=1
@@ -563,24 +486,20 @@ define double @double_fmin_acquire(ptr %p) nounwind {
 ; LA64D-NEXT:    fmin.d $fa1, $fa1, $fs0
 ; LA64D-NEXT:    fst.d $fa0, $sp, 16
 ; LA64D-NEXT:    fst.d $fa1, $sp, 8
-; LA64D-NEXT:    move $a0, $s0
+; LA64D-NEXT:    ori $a0, $zero, 8
+; LA64D-NEXT:    addi.d $a2, $sp, 16
+; LA64D-NEXT:    addi.d $a3, $sp, 8
+; LA64D-NEXT:    ori $a4, $zero, 2
+; LA64D-NEXT:    ori $a5, $zero, 2
 ; LA64D-NEXT:    move $a1, $fp
-; LA64D-NEXT:    move $a2, $s1
-; LA64D-NEXT:    move $a3, $s2
-; LA64D-NEXT:    move $a4, $s3
-; LA64D-NEXT:    move $a5, $s3
 ; LA64D-NEXT:    bl %plt(__atomic_compare_exchange)
 ; LA64D-NEXT:    fld.d $fa0, $sp, 16
 ; LA64D-NEXT:    beqz $a0, .LBB6_1
 ; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64D-NEXT:    fld.d $fs0, $sp, 24 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
-; LA64D-NEXT:    addi.d $sp, $sp, 80
+; LA64D-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64D-NEXT:    addi.d $sp, $sp, 48
 ; LA64D-NEXT:    ret
   %v = atomicrmw fmin ptr %p, double 1.0 acquire, align 4
   ret double %v
@@ -589,71 +508,48 @@ define double @double_fmin_acquire(ptr %p) nounwind {
 define double @double_fmax_acquire(ptr %p) nounwind {
 ; LA64F-LABEL: double_fmax_acquire:
 ; LA64F:       # %bb.0:
-; LA64F-NEXT:    addi.d $sp, $sp, -80
-; LA64F-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s4, $sp, 24 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s5, $sp, 16 # 8-byte Folded Spill
+; LA64F-NEXT:    addi.d $sp, $sp, -48
+; LA64F-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s0, $sp, 24 # 8-byte Folded Spill
 ; LA64F-NEXT:    move $fp, $a0
-; LA64F-NEXT:    ld.d $s5, $a0, 0
-; LA64F-NEXT:    lu52i.d $s0, $zero, 1023
-; LA64F-NEXT:    ori $s1, $zero, 8
-; LA64F-NEXT:    addi.d $s2, $sp, 8
-; LA64F-NEXT:    addi.d $s3, $sp, 0
-; LA64F-NEXT:    ori $s4, $zero, 2
+; LA64F-NEXT:    ld.d $s0, $a0, 0
 ; LA64F-NEXT:    .p2align 4, , 16
 ; LA64F-NEXT:  .LBB7_1: # %atomicrmw.start
 ; LA64F-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64F-NEXT:    move $a0, $s5
-; LA64F-NEXT:    move $a1, $s0
+; LA64F-NEXT:    lu52i.d $a1, $zero, 1023
+; LA64F-NEXT:    move $a0, $s0
 ; LA64F-NEXT:    bl %plt(fmax)
-; LA64F-NEXT:    st.d $s5, $sp, 8
-; LA64F-NEXT:    st.d $a0, $sp, 0
-; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    st.d $s0, $sp, 16
+; LA64F-NEXT:    st.d $a0, $sp, 8
+; LA64F-NEXT:    ori $a0, $zero, 8
+; LA64F-NEXT:    addi.d $a2, $sp, 16
+; LA64F-NEXT:    addi.d $a3, $sp, 8
+; LA64F-NEXT:    ori $a4, $zero, 2
+; LA64F-NEXT:    ori $a5, $zero, 2
 ; LA64F-NEXT:    move $a1, $fp
-; LA64F-NEXT:    move $a2, $s2
-; LA64F-NEXT:    move $a3, $s3
-; LA64F-NEXT:    move $a4, $s4
-; LA64F-NEXT:    move $a5, $s4
 ; LA64F-NEXT:    bl %plt(__atomic_compare_exchange)
-; LA64F-NEXT:    ld.d $s5, $sp, 8
+; LA64F-NEXT:    ld.d $s0, $sp, 16
 ; LA64F-NEXT:    beqz $a0, .LBB7_1
 ; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64F-NEXT:    move $a0, $s5
-; LA64F-NEXT:    ld.d $s5, $sp, 16 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s4, $sp, 24 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
-; LA64F-NEXT:    addi.d $sp, $sp, 80
+; LA64F-NEXT:    move $a0, $s0
+; LA64F-NEXT:    ld.d $s0, $sp, 24 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64F-NEXT:    addi.d $sp, $sp, 48
 ; LA64F-NEXT:    ret
 ;
 ; LA64D-LABEL: double_fmax_acquire:
 ; LA64D:       # %bb.0:
-; LA64D-NEXT:    addi.d $sp, $sp, -80
-; LA64D-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
+; LA64D-NEXT:    addi.d $sp, $sp, -48
+; LA64D-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
 ; LA64D-NEXT:    fst.d $fs0, $sp, 24 # 8-byte Folded Spill
 ; LA64D-NEXT:    move $fp, $a0
 ; LA64D-NEXT:    fld.d $fa0, $a0, 0
 ; LA64D-NEXT:    addi.d $a0, $zero, 1
 ; LA64D-NEXT:    movgr2fr.d $fa1, $a0
 ; LA64D-NEXT:    ffint.d.l $fs0, $fa1
-; LA64D-NEXT:    ori $s0, $zero, 8
-; LA64D-NEXT:    addi.d $s1, $sp, 16
-; LA64D-NEXT:    addi.d $s2, $sp, 8
-; LA64D-NEXT:    ori $s3, $zero, 2
 ; LA64D-NEXT:    .p2align 4, , 16
 ; LA64D-NEXT:  .LBB7_1: # %atomicrmw.start
 ; LA64D-NEXT:    # =>This Inner Loop Header: Depth=1
@@ -661,24 +557,20 @@ define double @double_fmax_acquire(ptr %p) nounwind {
 ; LA64D-NEXT:    fmax.d $fa1, $fa1, $fs0
 ; LA64D-NEXT:    fst.d $fa0, $sp, 16
 ; LA64D-NEXT:    fst.d $fa1, $sp, 8
-; LA64D-NEXT:    move $a0, $s0
+; LA64D-NEXT:    ori $a0, $zero, 8
+; LA64D-NEXT:    addi.d $a2, $sp, 16
+; LA64D-NEXT:    addi.d $a3, $sp, 8
+; LA64D-NEXT:    ori $a4, $zero, 2
+; LA64D-NEXT:    ori $a5, $zero, 2
 ; LA64D-NEXT:    move $a1, $fp
-; LA64D-NEXT:    move $a2, $s1
-; LA64D-NEXT:    move $a3, $s2
-; LA64D-NEXT:    move $a4, $s3
-; LA64D-NEXT:    move $a5, $s3
 ; LA64D-NEXT:    bl %plt(__atomic_compare_exchange)
 ; LA64D-NEXT:    fld.d $fa0, $sp, 16
 ; LA64D-NEXT:    beqz $a0, .LBB7_1
 ; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64D-NEXT:    fld.d $fs0, $sp, 24 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
-; LA64D-NEXT:    addi.d $sp, $sp, 80
+; LA64D-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64D-NEXT:    addi.d $sp, $sp, 48
 ; LA64D-NEXT:    ret
   %v = atomicrmw fmax ptr %p, double 1.0 acquire, align 4
   ret double %v
@@ -979,95 +871,68 @@ define float @float_fmax_release(ptr %p) nounwind {
 define double @double_fadd_release(ptr %p) nounwind {
 ; LA64F-LABEL: double_fadd_release:
 ; LA64F:       # %bb.0:
-; LA64F-NEXT:    addi.d $sp, $sp, -80
-; LA64F-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s4, $sp, 24 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s5, $sp, 16 # 8-byte Folded Spill
+; LA64F-NEXT:    addi.d $sp, $sp, -48
+; LA64F-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s0, $sp, 24 # 8-byte Folded Spill
 ; LA64F-NEXT:    move $fp, $a0
-; LA64F-NEXT:    ld.d $s5, $a0, 0
-; LA64F-NEXT:    lu52i.d $s0, $zero, 1023
-; LA64F-NEXT:    ori $s1, $zero, 8
-; LA64F-NEXT:    addi.d $s2, $sp, 8
-; LA64F-NEXT:    addi.d $s3, $sp, 0
-; LA64F-NEXT:    ori $s4, $zero, 3
+; LA64F-NEXT:    ld.d $s0, $a0, 0
 ; LA64F-NEXT:    .p2align 4, , 16
 ; LA64F-NEXT:  .LBB12_1: # %atomicrmw.start
 ; LA64F-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64F-NEXT:    move $a0, $s5
-; LA64F-NEXT:    move $a1, $s0
+; LA64F-NEXT:    lu52i.d $a1, $zero, 1023
+; LA64F-NEXT:    move $a0, $s0
 ; LA64F-NEXT:    bl %plt(__adddf3)
-; LA64F-NEXT:    st.d $s5, $sp, 8
-; LA64F-NEXT:    st.d $a0, $sp, 0
-; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    st.d $s0, $sp, 16
+; LA64F-NEXT:    st.d $a0, $sp, 8
+; LA64F-NEXT:    ori $a0, $zero, 8
+; LA64F-NEXT:    addi.d $a2, $sp, 16
+; LA64F-NEXT:    addi.d $a3, $sp, 8
+; LA64F-NEXT:    ori $a4, $zero, 3
 ; LA64F-NEXT:    move $a1, $fp
-; LA64F-NEXT:    move $a2, $s2
-; LA64F-NEXT:    move $a3, $s3
-; LA64F-NEXT:    move $a4, $s4
 ; LA64F-NEXT:    move $a5, $zero
 ; LA64F-NEXT:    bl %plt(__atomic_compare_exchange)
-; LA64F-NEXT:    ld.d $s5, $sp, 8
+; LA64F-NEXT:    ld.d $s0, $sp, 16
 ; LA64F-NEXT:    beqz $a0, .LBB12_1
 ; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64F-NEXT:    move $a0, $s5
-; LA64F-NEXT:    ld.d $s5, $sp, 16 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s4, $sp, 24 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
-; LA64F-NEXT:    addi.d $sp, $sp, 80
+; LA64F-NEXT:    move $a0, $s0
+; LA64F-NEXT:    ld.d $s0, $sp, 24 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64F-NEXT:    addi.d $sp, $sp, 48
 ; LA64F-NEXT:    ret
 ;
 ; LA64D-LABEL: double_fadd_release:
 ; LA64D:       # %bb.0:
-; LA64D-NEXT:    addi.d $sp, $sp, -80
-; LA64D-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
+; LA64D-NEXT:    addi.d $sp, $sp, -48
+; LA64D-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
 ; LA64D-NEXT:    fst.d $fs0, $sp, 24 # 8-byte Folded Spill
 ; LA64D-NEXT:    move $fp, $a0
 ; LA64D-NEXT:    fld.d $fa0, $a0, 0
 ; LA64D-NEXT:    addi.d $a0, $zero, 1
 ; LA64D-NEXT:    movgr2fr.d $fa1, $a0
 ; LA64D-NEXT:    ffint.d.l $fs0, $fa1
-; LA64D-NEXT:    ori $s0, $zero, 8
-; LA64D-NEXT:    addi.d $s1, $sp, 16
-; LA64D-NEXT:    addi.d $s2, $sp, 8
-; LA64D-NEXT:    ori $s3, $zero, 3
 ; LA64D-NEXT:    .p2align 4, , 16
 ; LA64D-NEXT:  .LBB12_1: # %atomicrmw.start
 ; LA64D-NEXT:    # =>This Inner Loop Header: Depth=1
 ; LA64D-NEXT:    fadd.d $fa1, $fa0, $fs0
 ; LA64D-NEXT:    fst.d $fa0, $sp, 16
 ; LA64D-NEXT:    fst.d $fa1, $sp, 8
-; LA64D-NEXT:    move $a0, $s0
+; LA64D-NEXT:    ori $a0, $zero, 8
+; LA64D-NEXT:    addi.d $a2, $sp, 16
+; LA64D-NEXT:    addi.d $a3, $sp, 8
+; LA64D-NEXT:    ori $a4, $zero, 3
 ; LA64D-NEXT:    move $a1, $fp
-; LA64D-NEXT:    move $a2, $s1
-; LA64D-NEXT:    move $a3, $s2
-; LA64D-NEXT:    move $a4, $s3
 ; LA64D-NEXT:    move $a5, $zero
 ; LA64D-NEXT:    bl %plt(__atomic_compare_exchange)
 ; LA64D-NEXT:    fld.d $fa0, $sp, 16
 ; LA64D-NEXT:    beqz $a0, .LBB12_1
 ; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64D-NEXT:    fld.d $fs0, $sp, 24 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
-; LA64D-NEXT:    addi.d $sp, $sp, 80
+; LA64D-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64D-NEXT:    addi.d $sp, $sp, 48
 ; LA64D-NEXT:    ret
   %v = atomicrmw fadd ptr %p, double 1.0 release, align 4
   ret double %v
@@ -1076,95 +941,68 @@ define double @double_fadd_release(ptr %p) nounwind {
 define double @double_fsub_release(ptr %p) nounwind {
 ; LA64F-LABEL: double_fsub_release:
 ; LA64F:       # %bb.0:
-; LA64F-NEXT:    addi.d $sp, $sp, -80
-; LA64F-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s4, $sp, 24 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s5, $sp, 16 # 8-byte Folded Spill
+; LA64F-NEXT:    addi.d $sp, $sp, -48
+; LA64F-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s0, $sp, 24 # 8-byte Folded Spill
 ; LA64F-NEXT:    move $fp, $a0
-; LA64F-NEXT:    ld.d $s5, $a0, 0
-; LA64F-NEXT:    lu52i.d $s0, $zero, -1025
-; LA64F-NEXT:    ori $s1, $zero, 8
-; LA64F-NEXT:    addi.d $s2, $sp, 8
-; LA64F-NEXT:    addi.d $s3, $sp, 0
-; LA64F-NEXT:    ori $s4, $zero, 3
+; LA64F-NEXT:    ld.d $s0, $a0, 0
 ; LA64F-NEXT:    .p2align 4, , 16
 ; LA64F-NEXT:  .LBB13_1: # %atomicrmw.start
 ; LA64F-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64F-NEXT:    move $a0, $s5
-; LA64F-NEXT:    move $a1, $s0
+; LA64F-NEXT:    lu52i.d $a1, $zero, -1025
+; LA64F-NEXT:    move $a0, $s0
 ; LA64F-NEXT:    bl %plt(__adddf3)
-; LA64F-NEXT:    st.d $s5, $sp, 8
-; LA64F-NEXT:    st.d $a0, $sp, 0
-; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    st.d $s0, $sp, 16
+; LA64F-NEXT:    st.d $a0, $sp, 8
+; LA64F-NEXT:    ori $a0, $zero, 8
+; LA64F-NEXT:    addi.d $a2, $sp, 16
+; LA64F-NEXT:    addi.d $a3, $sp, 8
+; LA64F-NEXT:    ori $a4, $zero, 3
 ; LA64F-NEXT:    move $a1, $fp
-; LA64F-NEXT:    move $a2, $s2
-; LA64F-NEXT:    move $a3, $s3
-; LA64F-NEXT:    move $a4, $s4
 ; LA64F-NEXT:    move $a5, $zero
 ; LA64F-NEXT:    bl %plt(__atomic_compare_exchange)
-; LA64F-NEXT:    ld.d $s5, $sp, 8
+; LA64F-NEXT:    ld.d $s0, $sp, 16
 ; LA64F-NEXT:    beqz $a0, .LBB13_1
 ; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64F-NEXT:    move $a0, $s5
-; LA64F-NEXT:    ld.d $s5, $sp, 16 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s4, $sp, 24 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
-; LA64F-NEXT:    addi.d $sp, $sp, 80
+; LA64F-NEXT:    move $a0, $s0
+; LA64F-NEXT:    ld.d $s0, $sp, 24 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64F-NEXT:    addi.d $sp, $sp, 48
 ; LA64F-NEXT:    ret
 ;
 ; LA64D-LABEL: double_fsub_release:
 ; LA64D:       # %bb.0:
-; LA64D-NEXT:    addi.d $sp, $sp, -80
-; LA64D-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
+; LA64D-NEXT:    addi.d $sp, $sp, -48
+; LA64D-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
 ; LA64D-NEXT:    fst.d $fs0, $sp, 24 # 8-byte Folded Spill
 ; LA64D-NEXT:    move $fp, $a0
 ; LA64D-NEXT:    fld.d $fa0, $a0, 0
 ; LA64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI13_0)
 ; LA64D-NEXT:    addi.d $a0, $a0, %pc_lo12(.LCPI13_0)
 ; LA64D-NEXT:    fld.d $fs0, $a0, 0
-; LA64D-NEXT:    ori $s0, $zero, 8
-; LA64D-NEXT:    addi.d $s1, $sp, 16
-; LA64D-NEXT:    addi.d $s2, $sp, 8
-; LA64D-NEXT:    ori $s3, $zero, 3
 ; LA64D-NEXT:    .p2align 4, , 16
 ; LA64D-NEXT:  .LBB13_1: # %atomicrmw.start
 ; LA64D-NEXT:    # =>This Inner Loop Header: Depth=1
 ; LA64D-NEXT:    fadd.d $fa1, $fa0, $fs0
 ; LA64D-NEXT:    fst.d $fa0, $sp, 16
 ; LA64D-NEXT:    fst.d $fa1, $sp, 8
-; LA64D-NEXT:    move $a0, $s0
+; LA64D-NEXT:    ori $a0, $zero, 8
+; LA64D-NEXT:    addi.d $a2, $sp, 16
+; LA64D-NEXT:    addi.d $a3, $sp, 8
+; LA64D-NEXT:    ori $a4, $zero, 3
 ; LA64D-NEXT:    move $a1, $fp
-; LA64D-NEXT:    move $a2, $s1
-; LA64D-NEXT:    move $a3, $s2
-; LA64D-NEXT:    move $a4, $s3
 ; LA64D-NEXT:    move $a5, $zero
 ; LA64D-NEXT:    bl %plt(__atomic_compare_exchange)
 ; LA64D-NEXT:    fld.d $fa0, $sp, 16
 ; LA64D-NEXT:    beqz $a0, .LBB13_1
 ; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64D-NEXT:    fld.d $fs0, $sp, 24 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
-; LA64D-NEXT:    addi.d $sp, $sp, 80
+; LA64D-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64D-NEXT:    addi.d $sp, $sp, 48
 ; LA64D-NEXT:    ret
   %v = atomicrmw fsub ptr %p, double 1.0 release, align 4
   ret double %v
@@ -1173,71 +1011,48 @@ define double @double_fsub_release(ptr %p) nounwind {
 define double @double_fmin_release(ptr %p) nounwind {
 ; LA64F-LABEL: double_fmin_release:
 ; LA64F:       # %bb.0:
-; LA64F-NEXT:    addi.d $sp, $sp, -80
-; LA64F-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s4, $sp, 24 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s5, $sp, 16 # 8-byte Folded Spill
+; LA64F-NEXT:    addi.d $sp, $sp, -48
+; LA64F-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s0, $sp, 24 # 8-byte Folded Spill
 ; LA64F-NEXT:    move $fp, $a0
-; LA64F-NEXT:    ld.d $s5, $a0, 0
-; LA64F-NEXT:    lu52i.d $s0, $zero, 1023
-; LA64F-NEXT:    ori $s1, $zero, 8
-; LA64F-NEXT:    addi.d $s2, $sp, 8
-; LA64F-NEXT:    addi.d $s3, $sp, 0
-; LA64F-NEXT:    ori $s4, $zero, 3
+; LA64F-NEXT:    ld.d $s0, $a0, 0
 ; LA64F-NEXT:    .p2align 4, , 16
 ; LA64F-NEXT:  .LBB14_1: # %atomicrmw.start
 ; LA64F-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64F-NEXT:    move $a0, $s5
-; LA64F-NEXT:    move $a1, $s0
+; LA64F-NEXT:    lu52i.d $a1, $zero, 1023
+; LA64F-NEXT:    move $a0, $s0
 ; LA64F-NEXT:    bl %plt(fmin)
-; LA64F-NEXT:    st.d $s5, $sp, 8
-; LA64F-NEXT:    st.d $a0, $sp, 0
-; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    st.d $s0, $sp, 16
+; LA64F-NEXT:    st.d $a0, $sp, 8
+; LA64F-NEXT:    ori $a0, $zero, 8
+; LA64F-NEXT:    addi.d $a2, $sp, 16
+; LA64F-NEXT:    addi.d $a3, $sp, 8
+; LA64F-NEXT:    ori $a4, $zero, 3
 ; LA64F-NEXT:    move $a1, $fp
-; LA64F-NEXT:    move $a2, $s2
-; LA64F-NEXT:    move $a3, $s3
-; LA64F-NEXT:    move $a4, $s4
 ; LA64F-NEXT:    move $a5, $zero
 ; LA64F-NEXT:    bl %plt(__atomic_compare_exchange)
-; LA64F-NEXT:    ld.d $s5, $sp, 8
+; LA64F-NEXT:    ld.d $s0, $sp, 16
 ; LA64F-NEXT:    beqz $a0, .LBB14_1
 ; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64F-NEXT:    move $a0, $s5
-; LA64F-NEXT:    ld.d $s5, $sp, 16 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s4, $sp, 24 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
-; LA64F-NEXT:    addi.d $sp, $sp, 80
+; LA64F-NEXT:    move $a0, $s0
+; LA64F-NEXT:    ld.d $s0, $sp, 24 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64F-NEXT:    addi.d $sp, $sp, 48
 ; LA64F-NEXT:    ret
 ;
 ; LA64D-LABEL: double_fmin_release:
 ; LA64D:       # %bb.0:
-; LA64D-NEXT:    addi.d $sp, $sp, -80
-; LA64D-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
+; LA64D-NEXT:    addi.d $sp, $sp, -48
+; LA64D-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
 ; LA64D-NEXT:    fst.d $fs0, $sp, 24 # 8-byte Folded Spill
 ; LA64D-NEXT:    move $fp, $a0
 ; LA64D-NEXT:    fld.d $fa0, $a0, 0
 ; LA64D-NEXT:    addi.d $a0, $zero, 1
 ; LA64D-NEXT:    movgr2fr.d $fa1, $a0
 ; LA64D-NEXT:    ffint.d.l $fs0, $fa1
-; LA64D-NEXT:    ori $s0, $zero, 8
-; LA64D-NEXT:    addi.d $s1, $sp, 16
-; LA64D-NEXT:    addi.d $s2, $sp, 8
-; LA64D-NEXT:    ori $s3, $zero, 3
 ; LA64D-NEXT:    .p2align 4, , 16
 ; LA64D-NEXT:  .LBB14_1: # %atomicrmw.start
 ; LA64D-NEXT:    # =>This Inner Loop Header: Depth=1
@@ -1245,24 +1060,20 @@ define double @double_fmin_release(ptr %p) nounwind {
 ; LA64D-NEXT:    fmin.d $fa1, $fa1, $fs0
 ; LA64D-NEXT:    fst.d $fa0, $sp, 16
 ; LA64D-NEXT:    fst.d $fa1, $sp, 8
-; LA64D-NEXT:    move $a0, $s0
+; LA64D-NEXT:    ori $a0, $zero, 8
+; LA64D-NEXT:    addi.d $a2, $sp, 16
+; LA64D-NEXT:    addi.d $a3, $sp, 8
+; LA64D-NEXT:    ori $a4, $zero, 3
 ; LA64D-NEXT:    move $a1, $fp
-; LA64D-NEXT:    move $a2, $s1
-; LA64D-NEXT:    move $a3, $s2
-; LA64D-NEXT:    move $a4, $s3
 ; LA64D-NEXT:    move $a5, $zero
 ; LA64D-NEXT:    bl %plt(__atomic_compare_exchange)
 ; LA64D-NEXT:    fld.d $fa0, $sp, 16
 ; LA64D-NEXT:    beqz $a0, .LBB14_1
 ; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64D-NEXT:    fld.d $fs0, $sp, 24 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
-; LA64D-NEXT:    addi.d $sp, $sp, 80
+; LA64D-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64D-NEXT:    addi.d $sp, $sp, 48
 ; LA64D-NEXT:    ret
   %v = atomicrmw fmin ptr %p, double 1.0 release, align 4
   ret double %v
@@ -1271,71 +1082,48 @@ define double @double_fmin_release(ptr %p) nounwind {
 define double @double_fmax_release(ptr %p) nounwind {
 ; LA64F-LABEL: double_fmax_release:
 ; LA64F:       # %bb.0:
-; LA64F-NEXT:    addi.d $sp, $sp, -80
-; LA64F-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s4, $sp, 24 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s5, $sp, 16 # 8-byte Folded Spill
+; LA64F-NEXT:    addi.d $sp, $sp, -48
+; LA64F-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s0, $sp, 24 # 8-byte Folded Spill
 ; LA64F-NEXT:    move $fp, $a0
-; LA64F-NEXT:    ld.d $s5, $a0, 0
-; LA64F-NEXT:    lu52i.d $s0, $zero, 1023
-; LA64F-NEXT:    ori $s1, $zero, 8
-; LA64F-NEXT:    addi.d $s2, $sp, 8
-; LA64F-NEXT:    addi.d $s3, $sp, 0
-; LA64F-NEXT:    ori $s4, $zero, 3
+; LA64F-NEXT:    ld.d $s0, $a0, 0
 ; LA64F-NEXT:    .p2align 4, , 16
 ; LA64F-NEXT:  .LBB15_1: # %atomicrmw.start
 ; LA64F-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64F-NEXT:    move $a0, $s5
-; LA64F-NEXT:    move $a1, $s0
+; LA64F-NEXT:    lu52i.d $a1, $zero, 1023
+; LA64F-NEXT:    move $a0, $s0
 ; LA64F-NEXT:    bl %plt(fmax)
-; LA64F-NEXT:    st.d $s5, $sp, 8
-; LA64F-NEXT:    st.d $a0, $sp, 0
-; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    st.d $s0, $sp, 16
+; LA64F-NEXT:    st.d $a0, $sp, 8
+; LA64F-NEXT:    ori $a0, $zero, 8
+; LA64F-NEXT:    addi.d $a2, $sp, 16
+; LA64F-NEXT:    addi.d $a3, $sp, 8
+; LA64F-NEXT:    ori $a4, $zero, 3
 ; LA64F-NEXT:    move $a1, $fp
-; LA64F-NEXT:    move $a2, $s2
-; LA64F-NEXT:    move $a3, $s3
-; LA64F-NEXT:    move $a4, $s4
 ; LA64F-NEXT:    move $a5, $zero
 ; LA64F-NEXT:    bl %plt(__atomic_compare_exchange)
-; LA64F-NEXT:    ld.d $s5, $sp, 8
+; LA64F-NEXT:    ld.d $s0, $sp, 16
 ; LA64F-NEXT:    beqz $a0, .LBB15_1
 ; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64F-NEXT:    move $a0, $s5
-; LA64F-NEXT:    ld.d $s5, $sp, 16 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s4, $sp, 24 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
-; LA64F-NEXT:    addi.d $sp, $sp, 80
+; LA64F-NEXT:    move $a0, $s0
+; LA64F-NEXT:    ld.d $s0, $sp, 24 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64F-NEXT:    addi.d $sp, $sp, 48
 ; LA64F-NEXT:    ret
 ;
 ; LA64D-LABEL: double_fmax_release:
 ; LA64D:       # %bb.0:
-; LA64D-NEXT:    addi.d $sp, $sp, -80
-; LA64D-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
+; LA64D-NEXT:    addi.d $sp, $sp, -48
+; LA64D-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
 ; LA64D-NEXT:    fst.d $fs0, $sp, 24 # 8-byte Folded Spill
 ; LA64D-NEXT:    move $fp, $a0
 ; LA64D-NEXT:    fld.d $fa0, $a0, 0
 ; LA64D-NEXT:    addi.d $a0, $zero, 1
 ; LA64D-NEXT:    movgr2fr.d $fa1, $a0
 ; LA64D-NEXT:    ffint.d.l $fs0, $fa1
-; LA64D-NEXT:    ori $s0, $zero, 8
-; LA64D-NEXT:    addi.d $s1, $sp, 16
-; LA64D-NEXT:    addi.d $s2, $sp, 8
-; LA64D-NEXT:    ori $s3, $zero, 3
 ; LA64D-NEXT:    .p2align 4, , 16
 ; LA64D-NEXT:  .LBB15_1: # %atomicrmw.start
 ; LA64D-NEXT:    # =>This Inner Loop Header: Depth=1
@@ -1343,24 +1131,20 @@ define double @double_fmax_release(ptr %p) nounwind {
 ; LA64D-NEXT:    fmax.d $fa1, $fa1, $fs0
 ; LA64D-NEXT:    fst.d $fa0, $sp, 16
 ; LA64D-NEXT:    fst.d $fa1, $sp, 8
-; LA64D-NEXT:    move $a0, $s0
+; LA64D-NEXT:    ori $a0, $zero, 8
+; LA64D-NEXT:    addi.d $a2, $sp, 16
+; LA64D-NEXT:    addi.d $a3, $sp, 8
+; LA64D-NEXT:    ori $a4, $zero, 3
 ; LA64D-NEXT:    move $a1, $fp
-; LA64D-NEXT:    move $a2, $s1
-; LA64D-NEXT:    move $a3, $s2
-; LA64D-NEXT:    move $a4, $s3
 ; LA64D-NEXT:    move $a5, $zero
 ; LA64D-NEXT:    bl %plt(__atomic_compare_exchange)
 ; LA64D-NEXT:    fld.d $fa0, $sp, 16
 ; LA64D-NEXT:    beqz $a0, .LBB15_1
 ; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64D-NEXT:    fld.d $fs0, $sp, 24 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
-; LA64D-NEXT:    addi.d $sp, $sp, 80
+; LA64D-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64D-NEXT:    addi.d $sp, $sp, 48
 ; LA64D-NEXT:    ret
   %v = atomicrmw fmax ptr %p, double 1.0 release, align 4
   ret double %v
@@ -1661,101 +1445,68 @@ define float @float_fmax_acq_rel(ptr %p) nounwind {
 define double @double_fadd_acq_rel(ptr %p) nounwind {
 ; LA64F-LABEL: double_fadd_acq_rel:
 ; LA64F:       # %bb.0:
-; LA64F-NEXT:    addi.d $sp, $sp, -96
-; LA64F-NEXT:    st.d $ra, $sp, 88 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $fp, $sp, 80 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s0, $sp, 72 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s1, $sp, 64 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s2, $sp, 56 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s3, $sp, 48 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s4, $sp, 40 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s5, $sp, 32 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s6, $sp, 24 # 8-byte Folded Spill
+; LA64F-NEXT:    addi.d $sp, $sp, -48
+; LA64F-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s0, $sp, 24 # 8-byte Folded Spill
 ; LA64F-NEXT:    move $fp, $a0
-; LA64F-NEXT:    ld.d $s6, $a0, 0
-; LA64F-NEXT:    lu52i.d $s0, $zero, 1023
-; LA64F-NEXT:    ori $s1, $zero, 8
-; LA64F-NEXT:    addi.d $s2, $sp, 16
-; LA64F-NEXT:    addi.d $s3, $sp, 8
-; LA64F-NEXT:    ori $s4, $zero, 4
-; LA64F-NEXT:    ori $s5, $zero, 2
+; LA64F-NEXT:    ld.d $s0, $a0, 0
 ; LA64F-NEXT:    .p2align 4, , 16
 ; LA64F-NEXT:  .LBB20_1: # %atomicrmw.start
 ; LA64F-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64F-NEXT:    move $a0, $s6
-; LA64F-NEXT:    move $a1, $s0
+; LA64F-NEXT:    lu52i.d $a1, $zero, 1023
+; LA64F-NEXT:    move $a0, $s0
 ; LA64F-NEXT:    bl %plt(__adddf3)
-; LA64F-NEXT:    st.d $s6, $sp, 16
+; LA64F-NEXT:    st.d $s0, $sp, 16
 ; LA64F-NEXT:    st.d $a0, $sp, 8
-; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    ori $a0, $zero, 8
+; LA64F-NEXT:    addi.d $a2, $sp, 16
+; LA64F-NEXT:    addi.d $a3, $sp, 8
+; LA64F-NEXT:    ori $a4, $zero, 4
+; LA64F-NEXT:    ori $a5, $zero, 2
 ; LA64F-NEXT:    move $a1, $fp
-; LA64F-NEXT:    move $a2, $s2
-; LA64F-NEXT:    move $a3, $s3
-; LA64F-NEXT:    move $a4, $s4
-; LA64F-NEXT:    move $a5, $s5
 ; LA64F-NEXT:    bl %plt(__atomic_compare_exchange)
-; LA64F-NEXT:    ld.d $s6, $sp, 16
+; LA64F-NEXT:    ld.d $s0, $sp, 16
 ; LA64F-NEXT:    beqz $a0, .LBB20_1
 ; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64F-NEXT:    move $a0, $s6
-; LA64F-NEXT:    ld.d $s6, $sp, 24 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s5, $sp, 32 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s4, $sp, 40 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s3, $sp, 48 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s2, $sp, 56 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s1, $sp, 64 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s0, $sp, 72 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $fp, $sp, 80 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $ra, $sp, 88 # 8-byte Folded Reload
-; LA64F-NEXT:    addi.d $sp, $sp, 96
+; LA64F-NEXT:    move $a0, $s0
+; LA64F-NEXT:    ld.d $s0, $sp, 24 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64F-NEXT:    addi.d $sp, $sp, 48
 ; LA64F-NEXT:    ret
 ;
 ; LA64D-LABEL: double_fadd_acq_rel:
 ; LA64D:       # %bb.0:
-; LA64D-NEXT:    addi.d $sp, $sp, -80
-; LA64D-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s4, $sp, 24 # 8-byte Folded Spill
-; LA64D-NEXT:    fst.d $fs0, $sp, 16 # 8-byte Folded Spill
+; LA64D-NEXT:    addi.d $sp, $sp, -48
+; LA64D-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
+; LA64D-NEXT:    fst.d $fs0, $sp, 24 # 8-byte Folded Spill
 ; LA64D-NEXT:    move $fp, $a0
 ; LA64D-NEXT:    fld.d $fa0, $a0, 0
 ; LA64D-NEXT:    addi.d $a0, $zero, 1
 ; LA64D-NEXT:    movgr2fr.d $fa1, $a0
 ; LA64D-NEXT:    ffint.d.l $fs0, $fa1
-; LA64D-NEXT:    ori $s0, $zero, 8
-; LA64D-NEXT:    addi.d $s1, $sp, 8
-; LA64D-NEXT:    addi.d $s2, $sp, 0
-; LA64D-NEXT:    ori $s3, $zero, 4
-; LA64D-NEXT:    ori $s4, $zero, 2
 ; LA64D-NEXT:    .p2align 4, , 16
 ; LA64D-NEXT:  .LBB20_1: # %atomicrmw.start
 ; LA64D-NEXT:    # =>This Inner Loop Header: Depth=1
 ; LA64D-NEXT:    fadd.d $fa1, $fa0, $fs0
-; LA64D-NEXT:    fst.d $fa0, $sp, 8
-; LA64D-NEXT:    fst.d $fa1, $sp, 0
-; LA64D-NEXT:    move $a0, $s0
+; LA64D-NEXT:    fst.d $fa0, $sp, 16
+; LA64D-NEXT:    fst.d $fa1, $sp, 8
+; LA64D-NEXT:    ori $a0, $zero, 8
+; LA64D-NEXT:    addi.d $a2, $sp, 16
+; LA64D-NEXT:    addi.d $a3, $sp, 8
+; LA64D-NEXT:    ori $a4, $zero, 4
+; LA64D-NEXT:    ori $a5, $zero, 2
 ; LA64D-NEXT:    move $a1, $fp
-; LA64D-NEXT:    move $a2, $s1
-; LA64D-NEXT:    move $a3, $s2
-; LA64D-NEXT:    move $a4, $s3
-; LA64D-NEXT:    move $a5, $s4
 ; LA64D-NEXT:    bl %plt(__atomic_compare_exchange)
-; LA64D-NEXT:    fld.d $fa0, $sp, 8
+; LA64D-NEXT:    fld.d $fa0, $sp, 16
 ; LA64D-NEXT:    beqz $a0, .LBB20_1
 ; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64D-NEXT:    fld.d $fs0, $sp, 16 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s4, $sp, 24 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
-; LA64D-NEXT:    addi.d $sp, $sp, 80
+; LA64D-NEXT:    fld.d $fs0, $sp, 24 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64D-NEXT:    addi.d $sp, $sp, 48
 ; LA64D-NEXT:    ret
   %v = atomicrmw fadd ptr %p, double 1.0 acq_rel, align 4
   ret double %v
@@ -1764,101 +1515,68 @@ define double @double_fadd_acq_rel(ptr %p) nounwind {
 define double @double_fsub_acq_rel(ptr %p) nounwind {
 ; LA64F-LABEL: double_fsub_acq_rel:
 ; LA64F:       # %bb.0:
-; LA64F-NEXT:    addi.d $sp, $sp, -96
-; LA64F-NEXT:    st.d $ra, $sp, 88 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $fp, $sp, 80 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s0, $sp, 72 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s1, $sp, 64 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s2, $sp, 56 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s3, $sp, 48 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s4, $sp, 40 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s5, $sp, 32 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s6, $sp, 24 # 8-byte Folded Spill
+; LA64F-NEXT:    addi.d $sp, $sp, -48
+; LA64F-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s0, $sp, 24 # 8-byte Folded Spill
 ; LA64F-NEXT:    move $fp, $a0
-; LA64F-NEXT:    ld.d $s6, $a0, 0
-; LA64F-NEXT:    lu52i.d $s0, $zero, -1025
-; LA64F-NEXT:    ori $s1, $zero, 8
-; LA64F-NEXT:    addi.d $s2, $sp, 16
-; LA64F-NEXT:    addi.d $s3, $sp, 8
-; LA64F-NEXT:    ori $s4, $zero, 4
-; LA64F-NEXT:    ori $s5, $zero, 2
+; LA64F-NEXT:    ld.d $s0, $a0, 0
 ; LA64F-NEXT:    .p2align 4, , 16
 ; LA64F-NEXT:  .LBB21_1: # %atomicrmw.start
 ; LA64F-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64F-NEXT:    move $a0, $s6
-; LA64F-NEXT:    move $a1, $s0
+; LA64F-NEXT:    lu52i.d $a1, $zero, -1025
+; LA64F-NEXT:    move $a0, $s0
 ; LA64F-NEXT:    bl %plt(__adddf3)
-; LA64F-NEXT:    st.d $s6, $sp, 16
+; LA64F-NEXT:    st.d $s0, $sp, 16
 ; LA64F-NEXT:    st.d $a0, $sp, 8
-; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    ori $a0, $zero, 8
+; LA64F-NEXT:    addi.d $a2, $sp, 16
+; LA64F-NEXT:    addi.d $a3, $sp, 8
+; LA64F-NEXT:    ori $a4, $zero, 4
+; LA64F-NEXT:    ori $a5, $zero, 2
 ; LA64F-NEXT:    move $a1, $fp
-; LA64F-NEXT:    move $a2, $s2
-; LA64F-NEXT:    move $a3, $s3
-; LA64F-NEXT:    move $a4, $s4
-; LA64F-NEXT:    move $a5, $s5
 ; LA64F-NEXT:    bl %plt(__atomic_compare_exchange)
-; LA64F-NEXT:    ld.d $s6, $sp, 16
+; LA64F-NEXT:    ld.d $s0, $sp, 16
 ; LA64F-NEXT:    beqz $a0, .LBB21_1
 ; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64F-NEXT:    move $a0, $s6
-; LA64F-NEXT:    ld.d $s6, $sp, 24 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s5, $sp, 32 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s4, $sp, 40 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s3, $sp, 48 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s2, $sp, 56 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s1, $sp, 64 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s0, $sp, 72 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $fp, $sp, 80 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $ra, $sp, 88 # 8-byte Folded Reload
-; LA64F-NEXT:    addi.d $sp, $sp, 96
+; LA64F-NEXT:    move $a0, $s0
+; LA64F-NEXT:    ld.d $s0, $sp, 24 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64F-NEXT:    addi.d $sp, $sp, 48
 ; LA64F-NEXT:    ret
 ;
 ; LA64D-LABEL: double_fsub_acq_rel:
 ; LA64D:       # %bb.0:
-; LA64D-NEXT:    addi.d $sp, $sp, -80
-; LA64D-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s4, $sp, 24 # 8-byte Folded Spill
-; LA64D-NEXT:    fst.d $fs0, $sp, 16 # 8-byte Folded Spill
+; LA64D-NEXT:    addi.d $sp, $sp, -48
+; LA64D-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
+; LA64D-NEXT:    fst.d $fs0, $sp, 24 # 8-byte Folded Spill
 ; LA64D-NEXT:    move $fp, $a0
 ; LA64D-NEXT:    fld.d $fa0, $a0, 0
 ; LA64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI21_0)
 ; LA64D-NEXT:    addi.d $a0, $a0, %pc_lo12(.LCPI21_0)
 ; LA64D-NEXT:    fld.d $fs0, $a0, 0
-; LA64D-NEXT:    ori $s0, $zero, 8
-; LA64D-NEXT:    addi.d $s1, $sp, 8
-; LA64D-NEXT:    addi.d $s2, $sp, 0
-; LA64D-NEXT:    ori $s3, $zero, 4
-; LA64D-NEXT:    ori $s4, $zero, 2
 ; LA64D-NEXT:    .p2align 4, , 16
 ; LA64D-NEXT:  .LBB21_1: # %atomicrmw.start
 ; LA64D-NEXT:    # =>This Inner Loop Header: Depth=1
 ; LA64D-NEXT:    fadd.d $fa1, $fa0, $fs0
-; LA64D-NEXT:    fst.d $fa0, $sp, 8
-; LA64D-NEXT:    fst.d $fa1, $sp, 0
-; LA64D-NEXT:    move $a0, $s0
+; LA64D-NEXT:    fst.d $fa0, $sp, 16
+; LA64D-NEXT:    fst.d $fa1, $sp, 8
+; LA64D-NEXT:    ori $a0, $zero, 8
+; LA64D-NEXT:    addi.d $a2, $sp, 16
+; LA64D-NEXT:    addi.d $a3, $sp, 8
+; LA64D-NEXT:    ori $a4, $zero, 4
+; LA64D-NEXT:    ori $a5, $zero, 2
 ; LA64D-NEXT:    move $a1, $fp
-; LA64D-NEXT:    move $a2, $s1
-; LA64D-NEXT:    move $a3, $s2
-; LA64D-NEXT:    move $a4, $s3
-; LA64D-NEXT:    move $a5, $s4
 ; LA64D-NEXT:    bl %plt(__atomic_compare_exchange)
-; LA64D-NEXT:    fld.d $fa0, $sp, 8
+; LA64D-NEXT:    fld.d $fa0, $sp, 16
 ; LA64D-NEXT:    beqz $a0, .LBB21_1
 ; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64D-NEXT:    fld.d $fs0, $sp, 16 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s4, $sp, 24 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
-; LA64D-NEXT:    addi.d $sp, $sp, 80
+; LA64D-NEXT:    fld.d $fs0, $sp, 24 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64D-NEXT:    addi.d $sp, $sp, 48
 ; LA64D-NEXT:    ret
   %v = atomicrmw fsub ptr %p, double 1.0 acq_rel, align 4
   ret double %v
@@ -1867,102 +1585,69 @@ define double @double_fsub_acq_rel(ptr %p) nounwind {
 define double @double_fmin_acq_rel(ptr %p) nounwind {
 ; LA64F-LABEL: double_fmin_acq_rel:
 ; LA64F:       # %bb.0:
-; LA64F-NEXT:    addi.d $sp, $sp, -96
-; LA64F-NEXT:    st.d $ra, $sp, 88 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $fp, $sp, 80 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s0, $sp, 72 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s1, $sp, 64 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s2, $sp, 56 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s3, $sp, 48 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s4, $sp, 40 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s5, $sp, 32 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s6, $sp, 24 # 8-byte Folded Spill
+; LA64F-NEXT:    addi.d $sp, $sp, -48
+; LA64F-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s0, $sp, 24 # 8-byte Folded Spill
 ; LA64F-NEXT:    move $fp, $a0
-; LA64F-NEXT:    ld.d $s6, $a0, 0
-; LA64F-NEXT:    lu52i.d $s0, $zero, 1023
-; LA64F-NEXT:    ori $s1, $zero, 8
-; LA64F-NEXT:    addi.d $s2, $sp, 16
-; LA64F-NEXT:    addi.d $s3, $sp, 8
-; LA64F-NEXT:    ori $s4, $zero, 4
-; LA64F-NEXT:    ori $s5, $zero, 2
+; LA64F-NEXT:    ld.d $s0, $a0, 0
 ; LA64F-NEXT:    .p2align 4, , 16
 ; LA64F-NEXT:  .LBB22_1: # %atomicrmw.start
 ; LA64F-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64F-NEXT:    move $a0, $s6
-; LA64F-NEXT:    move $a1, $s0
+; LA64F-NEXT:    lu52i.d $a1, $zero, 1023
+; LA64F-NEXT:    move $a0, $s0
 ; LA64F-NEXT:    bl %plt(fmin)
-; LA64F-NEXT:    st.d $s6, $sp, 16
+; LA64F-NEXT:    st.d $s0, $sp, 16
 ; LA64F-NEXT:    st.d $a0, $sp, 8
-; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    ori $a0, $zero, 8
+; LA64F-NEXT:    addi.d $a2, $sp, 16
+; LA64F-NEXT:    addi.d $a3, $sp, 8
+; LA64F-NEXT:    ori $a4, $zero, 4
+; LA64F-NEXT:    ori $a5, $zero, 2
 ; LA64F-NEXT:    move $a1, $fp
-; LA64F-NEXT:    move $a2, $s2
-; LA64F-NEXT:    move $a3, $s3
-; LA64F-NEXT:    move $a4, $s4
-; LA64F-NEXT:    move $a5, $s5
 ; LA64F-NEXT:    bl %plt(__atomic_compare_exchange)
-; LA64F-NEXT:    ld.d $s6, $sp, 16
+; LA64F-NEXT:    ld.d $s0, $sp, 16
 ; LA64F-NEXT:    beqz $a0, .LBB22_1
 ; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64F-NEXT:    move $a0, $s6
-; LA64F-NEXT:    ld.d $s6, $sp, 24 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s5, $sp, 32 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s4, $sp, 40 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s3, $sp, 48 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s2, $sp, 56 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s1, $sp, 64 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s0, $sp, 72 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $fp, $sp, 80 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $ra, $sp, 88 # 8-byte Folded Reload
-; LA64F-NEXT:    addi.d $sp, $sp, 96
+; LA64F-NEXT:    move $a0, $s0
+; LA64F-NEXT:    ld.d $s0, $sp, 24 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64F-NEXT:    addi.d $sp, $sp, 48
 ; LA64F-NEXT:    ret
 ;
 ; LA64D-LABEL: double_fmin_acq_rel:
 ; LA64D:       # %bb.0:
-; LA64D-NEXT:    addi.d $sp, $sp, -80
-; LA64D-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s4, $sp, 24 # 8-byte Folded Spill
-; LA64D-NEXT:    fst.d $fs0, $sp, 16 # 8-byte Folded Spill
+; LA64D-NEXT:    addi.d $sp, $sp, -48
+; LA64D-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
+; LA64D-NEXT:    fst.d $fs0, $sp, 24 # 8-byte Folded Spill
 ; LA64D-NEXT:    move $fp, $a0
 ; LA64D-NEXT:    fld.d $fa0, $a0, 0
 ; LA64D-NEXT:    addi.d $a0, $zero, 1
 ; LA64D-NEXT:    movgr2fr.d $fa1, $a0
 ; LA64D-NEXT:    ffint.d.l $fs0, $fa1
-; LA64D-NEXT:    ori $s0, $zero, 8
-; LA64D-NEXT:    addi.d $s1, $sp, 8
-; LA64D-NEXT:    addi.d $s2, $sp, 0
-; LA64D-NEXT:    ori $s3, $zero, 4
-; LA64D-NEXT:    ori $s4, $zero, 2
 ; LA64D-NEXT:    .p2align 4, , 16
 ; LA64D-NEXT:  .LBB22_1: # %atomicrmw.start
 ; LA64D-NEXT:    # =>This Inner Loop Header: Depth=1
 ; LA64D-NEXT:    fmax.d $fa1, $fa0, $fa0
 ; LA64D-NEXT:    fmin.d $fa1, $fa1, $fs0
-; LA64D-NEXT:    fst.d $fa0, $sp, 8
-; LA64D-NEXT:    fst.d $fa1, $sp, 0
-; LA64D-NEXT:    move $a0, $s0
+; LA64D-NEXT:    fst.d $fa0, $sp, 16
+; LA64D-NEXT:    fst.d $fa1, $sp, 8
+; LA64D-NEXT:    ori $a0, $zero, 8
+; LA64D-NEXT:    addi.d $a2, $sp, 16
+; LA64D-NEXT:    addi.d $a3, $sp, 8
+; LA64D-NEXT:    ori $a4, $zero, 4
+; LA64D-NEXT:    ori $a5, $zero, 2
 ; LA64D-NEXT:    move $a1, $fp
-; LA64D-NEXT:    move $a2, $s1
-; LA64D-NEXT:    move $a3, $s2
-; LA64D-NEXT:    move $a4, $s3
-; LA64D-NEXT:    move $a5, $s4
 ; LA64D-NEXT:    bl %plt(__atomic_compare_exchange)
-; LA64D-NEXT:    fld.d $fa0, $sp, 8
+; LA64D-NEXT:    fld.d $fa0, $sp, 16
 ; LA64D-NEXT:    beqz $a0, .LBB22_1
 ; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64D-NEXT:    fld.d $fs0, $sp, 16 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s4, $sp, 24 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
-; LA64D-NEXT:    addi.d $sp, $sp, 80
+; LA64D-NEXT:    fld.d $fs0, $sp, 24 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64D-NEXT:    addi.d $sp, $sp, 48
 ; LA64D-NEXT:    ret
   %v = atomicrmw fmin ptr %p, double 1.0 acq_rel, align 4
   ret double %v
@@ -1971,102 +1656,69 @@ define double @double_fmin_acq_rel(ptr %p) nounwind {
 define double @double_fmax_acq_rel(ptr %p) nounwind {
 ; LA64F-LABEL: double_fmax_acq_rel:
 ; LA64F:       # %bb.0:
-; LA64F-NEXT:    addi.d $sp, $sp, -96
-; LA64F-NEXT:    st.d $ra, $sp, 88 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $fp, $sp, 80 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s0, $sp, 72 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s1, $sp, 64 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s2, $sp, 56 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s3, $sp, 48 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s4, $sp, 40 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s5, $sp, 32 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s6, $sp, 24 # 8-byte Folded Spill
+; LA64F-NEXT:    addi.d $sp, $sp, -48
+; LA64F-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s0, $sp, 24 # 8-byte Folded Spill
 ; LA64F-NEXT:    move $fp, $a0
-; LA64F-NEXT:    ld.d $s6, $a0, 0
-; LA64F-NEXT:    lu52i.d $s0, $zero, 1023
-; LA64F-NEXT:    ori $s1, $zero, 8
-; LA64F-NEXT:    addi.d $s2, $sp, 16
-; LA64F-NEXT:    addi.d $s3, $sp, 8
-; LA64F-NEXT:    ori $s4, $zero, 4
-; LA64F-NEXT:    ori $s5, $zero, 2
+; LA64F-NEXT:    ld.d $s0, $a0, 0
 ; LA64F-NEXT:    .p2align 4, , 16
 ; LA64F-NEXT:  .LBB23_1: # %atomicrmw.start
 ; LA64F-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64F-NEXT:    move $a0, $s6
-; LA64F-NEXT:    move $a1, $s0
+; LA64F-NEXT:    lu52i.d $a1, $zero, 1023
+; LA64F-NEXT:    move $a0, $s0
 ; LA64F-NEXT:    bl %plt(fmax)
-; LA64F-NEXT:    st.d $s6, $sp, 16
+; LA64F-NEXT:    st.d $s0, $sp, 16
 ; LA64F-NEXT:    st.d $a0, $sp, 8
-; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    ori $a0, $zero, 8
+; LA64F-NEXT:    addi.d $a2, $sp, 16
+; LA64F-NEXT:    addi.d $a3, $sp, 8
+; LA64F-NEXT:    ori $a4, $zero, 4
+; LA64F-NEXT:    ori $a5, $zero, 2
 ; LA64F-NEXT:    move $a1, $fp
-; LA64F-NEXT:    move $a2, $s2
-; LA64F-NEXT:    move $a3, $s3
-; LA64F-NEXT:    move $a4, $s4
-; LA64F-NEXT:    move $a5, $s5
 ; LA64F-NEXT:    bl %plt(__atomic_compare_exchange)
-; LA64F-NEXT:    ld.d $s6, $sp, 16
+; LA64F-NEXT:    ld.d $s0, $sp, 16
 ; LA64F-NEXT:    beqz $a0, .LBB23_1
 ; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64F-NEXT:    move $a0, $s6
-; LA64F-NEXT:    ld.d $s6, $sp, 24 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s5, $sp, 32 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s4, $sp, 40 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s3, $sp, 48 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s2, $sp, 56 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s1, $sp, 64 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s0, $sp, 72 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $fp, $sp, 80 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $ra, $sp, 88 # 8-byte Folded Reload
-; LA64F-NEXT:    addi.d $sp, $sp, 96
+; LA64F-NEXT:    move $a0, $s0
+; LA64F-NEXT:    ld.d $s0, $sp, 24 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64F-NEXT:    addi.d $sp, $sp, 48
 ; LA64F-NEXT:    ret
 ;
 ; LA64D-LABEL: double_fmax_acq_rel:
 ; LA64D:       # %bb.0:
-; LA64D-NEXT:    addi.d $sp, $sp, -80
-; LA64D-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s4, $sp, 24 # 8-byte Folded Spill
-; LA64D-NEXT:    fst.d $fs0, $sp, 16 # 8-byte Folded Spill
+; LA64D-NEXT:    addi.d $sp, $sp, -48
+; LA64D-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
+; LA64D-NEXT:    fst.d $fs0, $sp, 24 # 8-byte Folded Spill
 ; LA64D-NEXT:    move $fp, $a0
 ; LA64D-NEXT:    fld.d $fa0, $a0, 0
 ; LA64D-NEXT:    addi.d $a0, $zero, 1
 ; LA64D-NEXT:    movgr2fr.d $fa1, $a0
 ; LA64D-NEXT:    ffint.d.l $fs0, $fa1
-; LA64D-NEXT:    ori $s0, $zero, 8
-; LA64D-NEXT:    addi.d $s1, $sp, 8
-; LA64D-NEXT:    addi.d $s2, $sp, 0
-; LA64D-NEXT:    ori $s3, $zero, 4
-; LA64D-NEXT:    ori $s4, $zero, 2
 ; LA64D-NEXT:    .p2align 4, , 16
 ; LA64D-NEXT:  .LBB23_1: # %atomicrmw.start
 ; LA64D-NEXT:    # =>This Inner Loop Header: Depth=1
 ; LA64D-NEXT:    fmax.d $fa1, $fa0, $fa0
 ; LA64D-NEXT:    fmax.d $fa1, $fa1, $fs0
-; LA64D-NEXT:    fst.d $fa0, $sp, 8
-; LA64D-NEXT:    fst.d $fa1, $sp, 0
-; LA64D-NEXT:    move $a0, $s0
+; LA64D-NEXT:    fst.d $fa0, $sp, 16
+; LA64D-NEXT:    fst.d $fa1, $sp, 8
+; LA64D-NEXT:    ori $a0, $zero, 8
+; LA64D-NEXT:    addi.d $a2, $sp, 16
+; LA64D-NEXT:    addi.d $a3, $sp, 8
+; LA64D-NEXT:    ori $a4, $zero, 4
+; LA64D-NEXT:    ori $a5, $zero, 2
 ; LA64D-NEXT:    move $a1, $fp
-; LA64D-NEXT:    move $a2, $s1
-; LA64D-NEXT:    move $a3, $s2
-; LA64D-NEXT:    move $a4, $s3
-; LA64D-NEXT:    move $a5, $s4
 ; LA64D-NEXT:    bl %plt(__atomic_compare_exchange)
-; LA64D-NEXT:    fld.d $fa0, $sp, 8
+; LA64D-NEXT:    fld.d $fa0, $sp, 16
 ; LA64D-NEXT:    beqz $a0, .LBB23_1
 ; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64D-NEXT:    fld.d $fs0, $sp, 16 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s4, $sp, 24 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
-; LA64D-NEXT:    addi.d $sp, $sp, 80
+; LA64D-NEXT:    fld.d $fs0, $sp, 24 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64D-NEXT:    addi.d $sp, $sp, 48
 ; LA64D-NEXT:    ret
   %v = atomicrmw fmax ptr %p, double 1.0 acq_rel, align 4
   ret double %v
@@ -2367,95 +2019,68 @@ define float @float_fmax_seq_cst(ptr %p) nounwind {
 define double @double_fadd_seq_cst(ptr %p) nounwind {
 ; LA64F-LABEL: double_fadd_seq_cst:
 ; LA64F:       # %bb.0:
-; LA64F-NEXT:    addi.d $sp, $sp, -80
-; LA64F-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s4, $sp, 24 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s5, $sp, 16 # 8-byte Folded Spill
+; LA64F-NEXT:    addi.d $sp, $sp, -48
+; LA64F-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s0, $sp, 24 # 8-byte Folded Spill
 ; LA64F-NEXT:    move $fp, $a0
-; LA64F-NEXT:    ld.d $s5, $a0, 0
-; LA64F-NEXT:    lu52i.d $s0, $zero, 1023
-; LA64F-NEXT:    ori $s1, $zero, 8
-; LA64F-NEXT:    addi.d $s2, $sp, 8
-; LA64F-NEXT:    addi.d $s3, $sp, 0
-; LA64F-NEXT:    ori $s4, $zero, 5
+; LA64F-NEXT:    ld.d $s0, $a0, 0
 ; LA64F-NEXT:    .p2align 4, , 16
 ; LA64F-NEXT:  .LBB28_1: # %atomicrmw.start
 ; LA64F-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64F-NEXT:    move $a0, $s5
-; LA64F-NEXT:    move $a1, $s0
+; LA64F-NEXT:    lu52i.d $a1, $zero, 1023
+; LA64F-NEXT:    move $a0, $s0
 ; LA64F-NEXT:    bl %plt(__adddf3)
-; LA64F-NEXT:    st.d $s5, $sp, 8
-; LA64F-NEXT:    st.d $a0, $sp, 0
-; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    st.d $s0, $sp, 16
+; LA64F-NEXT:    st.d $a0, $sp, 8
+; LA64F-NEXT:    ori $a0, $zero, 8
+; LA64F-NEXT:    addi.d $a2, $sp, 16
+; LA64F-NEXT:    addi.d $a3, $sp, 8
+; LA64F-NEXT:    ori $a4, $zero, 5
+; LA64F-NEXT:    ori $a5, $zero, 5
 ; LA64F-NEXT:    move $a1, $fp
-; LA64F-NEXT:    move $a2, $s2
-; LA64F-NEXT:    move $a3, $s3
-; LA64F-NEXT:    move $a4, $s4
-; LA64F-NEXT:    move $a5, $s4
 ; LA64F-NEXT:    bl %plt(__atomic_compare_exchange)
-; LA64F-NEXT:    ld.d $s5, $sp, 8
+; LA64F-NEXT:    ld.d $s0, $sp, 16
 ; LA64F-NEXT:    beqz $a0, .LBB28_1
 ; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64F-NEXT:    move $a0, $s5
-; LA64F-NEXT:    ld.d $s5, $sp, 16 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s4, $sp, 24 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
-; LA64F-NEXT:    addi.d $sp, $sp, 80
+; LA64F-NEXT:    move $a0, $s0
+; LA64F-NEXT:    ld.d $s0, $sp, 24 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64F-NEXT:    addi.d $sp, $sp, 48
 ; LA64F-NEXT:    ret
 ;
 ; LA64D-LABEL: double_fadd_seq_cst:
 ; LA64D:       # %bb.0:
-; LA64D-NEXT:    addi.d $sp, $sp, -80
-; LA64D-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
+; LA64D-NEXT:    addi.d $sp, $sp, -48
+; LA64D-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
 ; LA64D-NEXT:    fst.d $fs0, $sp, 24 # 8-byte Folded Spill
 ; LA64D-NEXT:    move $fp, $a0
 ; LA64D-NEXT:    fld.d $fa0, $a0, 0
 ; LA64D-NEXT:    addi.d $a0, $zero, 1
 ; LA64D-NEXT:    movgr2fr.d $fa1, $a0
 ; LA64D-NEXT:    ffint.d.l $fs0, $fa1
-; LA64D-NEXT:    ori $s0, $zero, 8
-; LA64D-NEXT:    addi.d $s1, $sp, 16
-; LA64D-NEXT:    addi.d $s2, $sp, 8
-; LA64D-NEXT:    ori $s3, $zero, 5
 ; LA64D-NEXT:    .p2align 4, , 16
 ; LA64D-NEXT:  .LBB28_1: # %atomicrmw.start
 ; LA64D-NEXT:    # =>This Inner Loop Header: Depth=1
 ; LA64D-NEXT:    fadd.d $fa1, $fa0, $fs0
 ; LA64D-NEXT:    fst.d $fa0, $sp, 16
 ; LA64D-NEXT:    fst.d $fa1, $sp, 8
-; LA64D-NEXT:    move $a0, $s0
+; LA64D-NEXT:    ori $a0, $zero, 8
+; LA64D-NEXT:    addi.d $a2, $sp, 16
+; LA64D-NEXT:    addi.d $a3, $sp, 8
+; LA64D-NEXT:    ori $a4, $zero, 5
+; LA64D-NEXT:    ori $a5, $zero, 5
 ; LA64D-NEXT:    move $a1, $fp
-; LA64D-NEXT:    move $a2, $s1
-; LA64D-NEXT:    move $a3, $s2
-; LA64D-NEXT:    move $a4, $s3
-; LA64D-NEXT:    move $a5, $s3
 ; LA64D-NEXT:    bl %plt(__atomic_compare_exchange)
 ; LA64D-NEXT:    fld.d $fa0, $sp, 16
 ; LA64D-NEXT:    beqz $a0, .LBB28_1
 ; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64D-NEXT:    fld.d $fs0, $sp, 24 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
-; LA64D-NEXT:    addi.d $sp, $sp, 80
+; LA64D-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64D-NEXT:    addi.d $sp, $sp, 48
 ; LA64D-NEXT:    ret
   %v = atomicrmw fadd ptr %p, double 1.0 seq_cst, align 4
   ret double %v
@@ -2464,95 +2089,68 @@ define double @double_fadd_seq_cst(ptr %p) nounwind {
 define double @double_fsub_seq_cst(ptr %p) nounwind {
 ; LA64F-LABEL: double_fsub_seq_cst:
 ; LA64F:       # %bb.0:
-; LA64F-NEXT:    addi.d $sp, $sp, -80
-; LA64F-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s4, $sp, 24 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s5, $sp, 16 # 8-byte Folded Spill
+; LA64F-NEXT:    addi.d $sp, $sp, -48
+; LA64F-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s0, $sp, 24 # 8-byte Folded Spill
 ; LA64F-NEXT:    move $fp, $a0
-; LA64F-NEXT:    ld.d $s5, $a0, 0
-; LA64F-NEXT:    lu52i.d $s0, $zero, -1025
-; LA64F-NEXT:    ori $s1, $zero, 8
-; LA64F-NEXT:    addi.d $s2, $sp, 8
-; LA64F-NEXT:    addi.d $s3, $sp, 0
-; LA64F-NEXT:    ori $s4, $zero, 5
+; LA64F-NEXT:    ld.d $s0, $a0, 0
 ; LA64F-NEXT:    .p2align 4, , 16
 ; LA64F-NEXT:  .LBB29_1: # %atomicrmw.start
 ; LA64F-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64F-NEXT:    move $a0, $s5
-; LA64F-NEXT:    move $a1, $s0
+; LA64F-NEXT:    lu52i.d $a1, $zero, -1025
+; LA64F-NEXT:    move $a0, $s0
 ; LA64F-NEXT:    bl %plt(__adddf3)
-; LA64F-NEXT:    st.d $s5, $sp, 8
-; LA64F-NEXT:    st.d $a0, $sp, 0
-; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    st.d $s0, $sp, 16
+; LA64F-NEXT:    st.d $a0, $sp, 8
+; LA64F-NEXT:    ori $a0, $zero, 8
+; LA64F-NEXT:    addi.d $a2, $sp, 16
+; LA64F-NEXT:    addi.d $a3, $sp, 8
+; LA64F-NEXT:    ori $a4, $zero, 5
+; LA64F-NEXT:    ori $a5, $zero, 5
 ; LA64F-NEXT:    move $a1, $fp
-; LA64F-NEXT:    move $a2, $s2
-; LA64F-NEXT:    move $a3, $s3
-; LA64F-NEXT:    move $a4, $s4
-; LA64F-NEXT:    move $a5, $s4
 ; LA64F-NEXT:    bl %plt(__atomic_compare_exchange)
-; LA64F-NEXT:    ld.d $s5, $sp, 8
+; LA64F-NEXT:    ld.d $s0, $sp, 16
 ; LA64F-NEXT:    beqz $a0, .LBB29_1
 ; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64F-NEXT:    move $a0, $s5
-; LA64F-NEXT:    ld.d $s5, $sp, 16 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s4, $sp, 24 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
-; LA64F-NEXT:    addi.d $sp, $sp, 80
+; LA64F-NEXT:    move $a0, $s0
+; LA64F-NEXT:    ld.d $s0, $sp, 24 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64F-NEXT:    addi.d $sp, $sp, 48
 ; LA64F-NEXT:    ret
 ;
 ; LA64D-LABEL: double_fsub_seq_cst:
 ; LA64D:       # %bb.0:
-; LA64D-NEXT:    addi.d $sp, $sp, -80
-; LA64D-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
+; LA64D-NEXT:    addi.d $sp, $sp, -48
+; LA64D-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
 ; LA64D-NEXT:    fst.d $fs0, $sp, 24 # 8-byte Folded Spill
 ; LA64D-NEXT:    move $fp, $a0
 ; LA64D-NEXT:    fld.d $fa0, $a0, 0
 ; LA64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI29_0)
 ; LA64D-NEXT:    addi.d $a0, $a0, %pc_lo12(.LCPI29_0)
 ; LA64D-NEXT:    fld.d $fs0, $a0, 0
-; LA64D-NEXT:    ori $s0, $zero, 8
-; LA64D-NEXT:    addi.d $s1, $sp, 16
-; LA64D-NEXT:    addi.d $s2, $sp, 8
-; LA64D-NEXT:    ori $s3, $zero, 5
 ; LA64D-NEXT:    .p2align 4, , 16
 ; LA64D-NEXT:  .LBB29_1: # %atomicrmw.start
 ; LA64D-NEXT:    # =>This Inner Loop Header: Depth=1
 ; LA64D-NEXT:    fadd.d $fa1, $fa0, $fs0
 ; LA64D-NEXT:    fst.d $fa0, $sp, 16
 ; LA64D-NEXT:    fst.d $fa1, $sp, 8
-; LA64D-NEXT:    move $a0, $s0
+; LA64D-NEXT:    ori $a0, $zero, 8
+; LA64D-NEXT:    addi.d $a2, $sp, 16
+; LA64D-NEXT:    addi.d $a3, $sp, 8
+; LA64D-NEXT:    ori $a4, $zero, 5
+; LA64D-NEXT:    ori $a5, $zero, 5
 ; LA64D-NEXT:    move $a1, $fp
-; LA64D-NEXT:    move $a2, $s1
-; LA64D-NEXT:    move $a3, $s2
-; LA64D-NEXT:    move $a4, $s3
-; LA64D-NEXT:    move $a5, $s3
 ; LA64D-NEXT:    bl %plt(__atomic_compare_exchange)
 ; LA64D-NEXT:    fld.d $fa0, $sp, 16
 ; LA64D-NEXT:    beqz $a0, .LBB29_1
 ; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64D-NEXT:    fld.d $fs0, $sp, 24 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
-; LA64D-NEXT:    addi.d $sp, $sp, 80
+; LA64D-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64D-NEXT:    addi.d $sp, $sp, 48
 ; LA64D-NEXT:    ret
   %v = atomicrmw fsub ptr %p, double 1.0 seq_cst, align 4
   ret double %v
@@ -2561,71 +2159,48 @@ define double @double_fsub_seq_cst(ptr %p) nounwind {
 define double @double_fmin_seq_cst(ptr %p) nounwind {
 ; LA64F-LABEL: double_fmin_seq_cst:
 ; LA64F:       # %bb.0:
-; LA64F-NEXT:    addi.d $sp, $sp, -80
-; LA64F-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s4, $sp, 24 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s5, $sp, 16 # 8-byte Folded Spill
+; LA64F-NEXT:    addi.d $sp, $sp, -48
+; LA64F-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s0, $sp, 24 # 8-byte Folded Spill
 ; LA64F-NEXT:    move $fp, $a0
-; LA64F-NEXT:    ld.d $s5, $a0, 0
-; LA64F-NEXT:    lu52i.d $s0, $zero, 1023
-; LA64F-NEXT:    ori $s1, $zero, 8
-; LA64F-NEXT:    addi.d $s2, $sp, 8
-; LA64F-NEXT:    addi.d $s3, $sp, 0
-; LA64F-NEXT:    ori $s4, $zero, 5
+; LA64F-NEXT:    ld.d $s0, $a0, 0
 ; LA64F-NEXT:    .p2align 4, , 16
 ; LA64F-NEXT:  .LBB30_1: # %atomicrmw.start
 ; LA64F-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64F-NEXT:    move $a0, $s5
-; LA64F-NEXT:    move $a1, $s0
+; LA64F-NEXT:    lu52i.d $a1, $zero, 1023
+; LA64F-NEXT:    move $a0, $s0
 ; LA64F-NEXT:    bl %plt(fmin)
-; LA64F-NEXT:    st.d $s5, $sp, 8
-; LA64F-NEXT:    st.d $a0, $sp, 0
-; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    st.d $s0, $sp, 16
+; LA64F-NEXT:    st.d $a0, $sp, 8
+; LA64F-NEXT:    ori $a0, $zero, 8
+; LA64F-NEXT:    addi.d $a2, $sp, 16
+; LA64F-NEXT:    addi.d $a3, $sp, 8
+; LA64F-NEXT:    ori $a4, $zero, 5
+; LA64F-NEXT:    ori $a5, $zero, 5
 ; LA64F-NEXT:    move $a1, $fp
-; LA64F-NEXT:    move $a2, $s2
-; LA64F-NEXT:    move $a3, $s3
-; LA64F-NEXT:    move $a4, $s4
-; LA64F-NEXT:    move $a5, $s4
 ; LA64F-NEXT:    bl %plt(__atomic_compare_exchange)
-; LA64F-NEXT:    ld.d $s5, $sp, 8
+; LA64F-NEXT:    ld.d $s0, $sp, 16
 ; LA64F-NEXT:    beqz $a0, .LBB30_1
 ; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64F-NEXT:    move $a0, $s5
-; LA64F-NEXT:    ld.d $s5, $sp, 16 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s4, $sp, 24 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
-; LA64F-NEXT:    addi.d $sp, $sp, 80
+; LA64F-NEXT:    move $a0, $s0
+; LA64F-NEXT:    ld.d $s0, $sp, 24 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64F-NEXT:    addi.d $sp, $sp, 48
 ; LA64F-NEXT:    ret
 ;
 ; LA64D-LABEL: double_fmin_seq_cst:
 ; LA64D:       # %bb.0:
-; LA64D-NEXT:    addi.d $sp, $sp, -80
-; LA64D-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
+; LA64D-NEXT:    addi.d $sp, $sp, -48
+; LA64D-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
 ; LA64D-NEXT:    fst.d $fs0, $sp, 24 # 8-byte Folded Spill
 ; LA64D-NEXT:    move $fp, $a0
 ; LA64D-NEXT:    fld.d $fa0, $a0, 0
 ; LA64D-NEXT:    addi.d $a0, $zero, 1
 ; LA64D-NEXT:    movgr2fr.d $fa1, $a0
 ; LA64D-NEXT:    ffint.d.l $fs0, $fa1
-; LA64D-NEXT:    ori $s0, $zero, 8
-; LA64D-NEXT:    addi.d $s1, $sp, 16
-; LA64D-NEXT:    addi.d $s2, $sp, 8
-; LA64D-NEXT:    ori $s3, $zero, 5
 ; LA64D-NEXT:    .p2align 4, , 16
 ; LA64D-NEXT:  .LBB30_1: # %atomicrmw.start
 ; LA64D-NEXT:    # =>This Inner Loop Header: Depth=1
@@ -2633,24 +2208,20 @@ define double @double_fmin_seq_cst(ptr %p) nounwind {
 ; LA64D-NEXT:    fmin.d $fa1, $fa1, $fs0
 ; LA64D-NEXT:    fst.d $fa0, $sp, 16
 ; LA64D-NEXT:    fst.d $fa1, $sp, 8
-; LA64D-NEXT:    move $a0, $s0
+; LA64D-NEXT:    ori $a0, $zero, 8
+; LA64D-NEXT:    addi.d $a2, $sp, 16
+; LA64D-NEXT:    addi.d $a3, $sp, 8
+; LA64D-NEXT:    ori $a4, $zero, 5
+; LA64D-NEXT:    ori $a5, $zero, 5
 ; LA64D-NEXT:    move $a1, $fp
-; LA64D-NEXT:    move $a2, $s1
-; LA64D-NEXT:    move $a3, $s2
-; LA64D-NEXT:    move $a4, $s3
-; LA64D-NEXT:    move $a5, $s3
 ; LA64D-NEXT:    bl %plt(__atomic_compare_exchange)
 ; LA64D-NEXT:    fld.d $fa0, $sp, 16
 ; LA64D-NEXT:    beqz $a0, .LBB30_1
 ; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64D-NEXT:    fld.d $fs0, $sp, 24 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
-; LA64D-NEXT:    addi.d $sp, $sp, 80
+; LA64D-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64D-NEXT:    addi.d $sp, $sp, 48
 ; LA64D-NEXT:    ret
   %v = atomicrmw fmin ptr %p, double 1.0 seq_cst, align 4
   ret double %v
@@ -2659,71 +2230,48 @@ define double @double_fmin_seq_cst(ptr %p) nounwind {
 define double @double_fmax_seq_cst(ptr %p) nounwind {
 ; LA64F-LABEL: double_fmax_seq_cst:
 ; LA64F:       # %bb.0:
-; LA64F-NEXT:    addi.d $sp, $sp, -80
-; LA64F-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s4, $sp, 24 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s5, $sp, 16 # 8-byte Folded Spill
+; LA64F-NEXT:    addi.d $sp, $sp, -48
+; LA64F-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s0, $sp, 24 # 8-byte Folded Spill
 ; LA64F-NEXT:    move $fp, $a0
-; LA64F-NEXT:    ld.d $s5, $a0, 0
-; LA64F-NEXT:    lu52i.d $s0, $zero, 1023
-; LA64F-NEXT:    ori $s1, $zero, 8
-; LA64F-NEXT:    addi.d $s2, $sp, 8
-; LA64F-NEXT:    addi.d $s3, $sp, 0
-; LA64F-NEXT:    ori $s4, $zero, 5
+; LA64F-NEXT:    ld.d $s0, $a0, 0
 ; LA64F-NEXT:    .p2align 4, , 16
 ; LA64F-NEXT:  .LBB31_1: # %atomicrmw.start
 ; LA64F-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64F-NEXT:    move $a0, $s5
-; LA64F-NEXT:    move $a1, $s0
+; LA64F-NEXT:    lu52i.d $a1, $zero, 1023
+; LA64F-NEXT:    move $a0, $s0
 ; LA64F-NEXT:    bl %plt(fmax)
-; LA64F-NEXT:    st.d $s5, $sp, 8
-; LA64F-NEXT:    st.d $a0, $sp, 0
-; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    st.d $s0, $sp, 16
+; LA64F-NEXT:    st.d $a0, $sp, 8
+; LA64F-NEXT:    ori $a0, $zero, 8
+; LA64F-NEXT:    addi.d $a2, $sp, 16
+; LA64F-NEXT:    addi.d $a3, $sp, 8
+; LA64F-NEXT:    ori $a4, $zero, 5
+; LA64F-NEXT:    ori $a5, $zero, 5
 ; LA64F-NEXT:    move $a1, $fp
-; LA64F-NEXT:    move $a2, $s2
-; LA64F-NEXT:    move $a3, $s3
-; LA64F-NEXT:    move $a4, $s4
-; LA64F-NEXT:    move $a5, $s4
 ; LA64F-NEXT:    bl %plt(__atomic_compare_exchange)
-; LA64F-NEXT:    ld.d $s5, $sp, 8
+; LA64F-NEXT:    ld.d $s0, $sp, 16
 ; LA64F-NEXT:    beqz $a0, .LBB31_1
 ; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64F-NEXT:    move $a0, $s5
-; LA64F-NEXT:    ld.d $s5, $sp, 16 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s4, $sp, 24 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
-; LA64F-NEXT:    addi.d $sp, $sp, 80
+; LA64F-NEXT:    move $a0, $s0
+; LA64F-NEXT:    ld.d $s0, $sp, 24 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64F-NEXT:    addi.d $sp, $sp, 48
 ; LA64F-NEXT:    ret
 ;
 ; LA64D-LABEL: double_fmax_seq_cst:
 ; LA64D:       # %bb.0:
-; LA64D-NEXT:    addi.d $sp, $sp, -80
-; LA64D-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
+; LA64D-NEXT:    addi.d $sp, $sp, -48
+; LA64D-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
 ; LA64D-NEXT:    fst.d $fs0, $sp, 24 # 8-byte Folded Spill
 ; LA64D-NEXT:    move $fp, $a0
 ; LA64D-NEXT:    fld.d $fa0, $a0, 0
 ; LA64D-NEXT:    addi.d $a0, $zero, 1
 ; LA64D-NEXT:    movgr2fr.d $fa1, $a0
 ; LA64D-NEXT:    ffint.d.l $fs0, $fa1
-; LA64D-NEXT:    ori $s0, $zero, 8
-; LA64D-NEXT:    addi.d $s1, $sp, 16
-; LA64D-NEXT:    addi.d $s2, $sp, 8
-; LA64D-NEXT:    ori $s3, $zero, 5
 ; LA64D-NEXT:    .p2align 4, , 16
 ; LA64D-NEXT:  .LBB31_1: # %atomicrmw.start
 ; LA64D-NEXT:    # =>This Inner Loop Header: Depth=1
@@ -2731,24 +2279,20 @@ define double @double_fmax_seq_cst(ptr %p) nounwind {
 ; LA64D-NEXT:    fmax.d $fa1, $fa1, $fs0
 ; LA64D-NEXT:    fst.d $fa0, $sp, 16
 ; LA64D-NEXT:    fst.d $fa1, $sp, 8
-; LA64D-NEXT:    move $a0, $s0
+; LA64D-NEXT:    ori $a0, $zero, 8
+; LA64D-NEXT:    addi.d $a2, $sp, 16
+; LA64D-NEXT:    addi.d $a3, $sp, 8
+; LA64D-NEXT:    ori $a4, $zero, 5
+; LA64D-NEXT:    ori $a5, $zero, 5
 ; LA64D-NEXT:    move $a1, $fp
-; LA64D-NEXT:    move $a2, $s1
-; LA64D-NEXT:    move $a3, $s2
-; LA64D-NEXT:    move $a4, $s3
-; LA64D-NEXT:    move $a5, $s3
 ; LA64D-NEXT:    bl %plt(__atomic_compare_exchange)
 ; LA64D-NEXT:    fld.d $fa0, $sp, 16
 ; LA64D-NEXT:    beqz $a0, .LBB31_1
 ; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64D-NEXT:    fld.d $fs0, $sp, 24 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
-; LA64D-NEXT:    addi.d $sp, $sp, 80
+; LA64D-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64D-NEXT:    addi.d $sp, $sp, 48
 ; LA64D-NEXT:    ret
   %v = atomicrmw fmax ptr %p, double 1.0 seq_cst, align 4
   ret double %v
@@ -3049,89 +2593,68 @@ define float @float_fmax_monotonic(ptr %p) nounwind {
 define double @double_fadd_monotonic(ptr %p) nounwind {
 ; LA64F-LABEL: double_fadd_monotonic:
 ; LA64F:       # %bb.0:
-; LA64F-NEXT:    addi.d $sp, $sp, -80
-; LA64F-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s4, $sp, 24 # 8-byte Folded Spill
+; LA64F-NEXT:    addi.d $sp, $sp, -48
+; LA64F-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s0, $sp, 24 # 8-byte Folded Spill
 ; LA64F-NEXT:    move $fp, $a0
-; LA64F-NEXT:    ld.d $s4, $a0, 0
-; LA64F-NEXT:    lu52i.d $s0, $zero, 1023
-; LA64F-NEXT:    ori $s1, $zero, 8
-; LA64F-NEXT:    addi.d $s2, $sp, 16
-; LA64F-NEXT:    addi.d $s3, $sp, 8
+; LA64F-NEXT:    ld.d $s0, $a0, 0
 ; LA64F-NEXT:    .p2align 4, , 16
 ; LA64F-NEXT:  .LBB36_1: # %atomicrmw.start
 ; LA64F-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64F-NEXT:    move $a0, $s4
-; LA64F-NEXT:    move $a1, $s0
+; LA64F-NEXT:    lu52i.d $a1, $zero, 1023
+; LA64F-NEXT:    move $a0, $s0
 ; LA64F-NEXT:    bl %plt(__adddf3)
-; LA64F-NEXT:    st.d $s4, $sp, 16
+; LA64F-NEXT:    st.d $s0, $sp, 16
 ; LA64F-NEXT:    st.d $a0, $sp, 8
-; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    ori $a0, $zero, 8
+; LA64F-NEXT:    addi.d $a2, $sp, 16
+; LA64F-NEXT:    addi.d $a3, $sp, 8
 ; LA64F-NEXT:    move $a1, $fp
-; LA64F-NEXT:    move $a2, $s2
-; LA64F-NEXT:    move $a3, $s3
 ; LA64F-NEXT:    move $a4, $zero
 ; LA64F-NEXT:    move $a5, $zero
 ; LA64F-NEXT:    bl %plt(__atomic_compare_exchange)
-; LA64F-NEXT:    ld.d $s4, $sp, 16
+; LA64F-NEXT:    ld.d $s0, $sp, 16
 ; LA64F-NEXT:    beqz $a0, .LBB36_1
 ; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64F-NEXT:    move $a0, $s4
-; LA64F-NEXT:    ld.d $s4, $sp, 24 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
-; LA64F-NEXT:    addi.d $sp, $sp, 80
+; LA64F-NEXT:    move $a0, $s0
+; LA64F-NEXT:    ld.d $s0, $sp, 24 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64F-NEXT:    addi.d $sp, $sp, 48
 ; LA64F-NEXT:    ret
 ;
 ; LA64D-LABEL: double_fadd_monotonic:
 ; LA64D:       # %bb.0:
-; LA64D-NEXT:    addi.d $sp, $sp, -64
-; LA64D-NEXT:    st.d $ra, $sp, 56 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $fp, $sp, 48 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s0, $sp, 40 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s1, $sp, 32 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s2, $sp, 24 # 8-byte Folded Spill
-; LA64D-NEXT:    fst.d $fs0, $sp, 16 # 8-byte Folded Spill
+; LA64D-NEXT:    addi.d $sp, $sp, -48
+; LA64D-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
+; LA64D-NEXT:    fst.d $fs0, $sp, 24 # 8-byte Folded Spill
 ; LA64D-NEXT:    move $fp, $a0
 ; LA64D-NEXT:    fld.d $fa0, $a0, 0
 ; LA64D-NEXT:    addi.d $a0, $zero, 1
 ; LA64D-NEXT:    movgr2fr.d $fa1, $a0
 ; LA64D-NEXT:    ffint.d.l $fs0, $fa1
-; LA64D-NEXT:    ori $s0, $zero, 8
-; LA64D-NEXT:    addi.d $s1, $sp, 8
-; LA64D-NEXT:    addi.d $s2, $sp, 0
 ; LA64D-NEXT:    .p2align 4, , 16
 ; LA64D-NEXT:  .LBB36_1: # %atomicrmw.start
 ; LA64D-NEXT:    # =>This Inner Loop Header: Depth=1
 ; LA64D-NEXT:    fadd.d $fa1, $fa0, $fs0
-; LA64D-NEXT:    fst.d $fa0, $sp, 8
-; LA64D-NEXT:    fst.d $fa1, $sp, 0
-; LA64D-NEXT:    move $a0, $s0
+; LA64D-NEXT:    fst.d $fa0, $sp, 16
+; LA64D-NEXT:    fst.d $fa1, $sp, 8
+; LA64D-NEXT:    ori $a0, $zero, 8
+; LA64D-NEXT:    addi.d $a2, $sp, 16
+; LA64D-NEXT:    addi.d $a3, $sp, 8
 ; LA64D-NEXT:    move $a1, $fp
-; LA64D-NEXT:    move $a2, $s1
-; LA64D-NEXT:    move $a3, $s2
 ; LA64D-NEXT:    move $a4, $zero
 ; LA64D-NEXT:    move $a5, $zero
 ; LA64D-NEXT:    bl %plt(__atomic_compare_exchange)
-; LA64D-NEXT:    fld.d $fa0, $sp, 8
+; LA64D-NEXT:    fld.d $fa0, $sp, 16
 ; LA64D-NEXT:    beqz $a0, .LBB36_1
 ; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64D-NEXT:    fld.d $fs0, $sp, 16 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s2, $sp, 24 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s1, $sp, 32 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s0, $sp, 40 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $fp, $sp, 48 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $ra, $sp, 56 # 8-byte Folded Reload
-; LA64D-NEXT:    addi.d $sp, $sp, 64
+; LA64D-NEXT:    fld.d $fs0, $sp, 24 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64D-NEXT:    addi.d $sp, $sp, 48
 ; LA64D-NEXT:    ret
   %v = atomicrmw fadd ptr %p, double 1.0 monotonic, align 4
   ret double %v
@@ -3140,89 +2663,68 @@ define double @double_fadd_monotonic(ptr %p) nounwind {
 define double @double_fsub_monotonic(ptr %p) nounwind {
 ; LA64F-LABEL: double_fsub_monotonic:
 ; LA64F:       # %bb.0:
-; LA64F-NEXT:    addi.d $sp, $sp, -80
-; LA64F-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s4, $sp, 24 # 8-byte Folded Spill
+; LA64F-NEXT:    addi.d $sp, $sp, -48
+; LA64F-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s0, $sp, 24 # 8-byte Folded Spill
 ; LA64F-NEXT:    move $fp, $a0
-; LA64F-NEXT:    ld.d $s4, $a0, 0
-; LA64F-NEXT:    lu52i.d $s0, $zero, -1025
-; LA64F-NEXT:    ori $s1, $zero, 8
-; LA64F-NEXT:    addi.d $s2, $sp, 16
-; LA64F-NEXT:    addi.d $s3, $sp, 8
+; LA64F-NEXT:    ld.d $s0, $a0, 0
 ; LA64F-NEXT:    .p2align 4, , 16
 ; LA64F-NEXT:  .LBB37_1: # %atomicrmw.start
 ; LA64F-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64F-NEXT:    move $a0, $s4
-; LA64F-NEXT:    move $a1, $s0
+; LA64F-NEXT:    lu52i.d $a1, $zero, -1025
+; LA64F-NEXT:    move $a0, $s0
 ; LA64F-NEXT:    bl %plt(__adddf3)
-; LA64F-NEXT:    st.d $s4, $sp, 16
+; LA64F-NEXT:    st.d $s0, $sp, 16
 ; LA64F-NEXT:    st.d $a0, $sp, 8
-; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    ori $a0, $zero, 8
+; LA64F-NEXT:    addi.d $a2, $sp, 16
+; LA64F-NEXT:    addi.d $a3, $sp, 8
 ; LA64F-NEXT:    move $a1, $fp
-; LA64F-NEXT:    move $a2, $s2
-; LA64F-NEXT:    move $a3, $s3
 ; LA64F-NEXT:    move $a4, $zero
 ; LA64F-NEXT:    move $a5, $zero
 ; LA64F-NEXT:    bl %plt(__atomic_compare_exchange)
-; LA64F-NEXT:    ld.d $s4, $sp, 16
+; LA64F-NEXT:    ld.d $s0, $sp, 16
 ; LA64F-NEXT:    beqz $a0, .LBB37_1
 ; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64F-NEXT:    move $a0, $s4
-; LA64F-NEXT:    ld.d $s4, $sp, 24 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
-; LA64F-NEXT:    addi.d $sp, $sp, 80
+; LA64F-NEXT:    move $a0, $s0
+; LA64F-NEXT:    ld.d $s0, $sp, 24 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64F-NEXT:    addi.d $sp, $sp, 48
 ; LA64F-NEXT:    ret
 ;
 ; LA64D-LABEL: double_fsub_monotonic:
 ; LA64D:       # %bb.0:
-; LA64D-NEXT:    addi.d $sp, $sp, -64
-; LA64D-NEXT:    st.d $ra, $sp, 56 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $fp, $sp, 48 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s0, $sp, 40 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s1, $sp, 32 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s2, $sp, 24 # 8-byte Folded Spill
-; LA64D-NEXT:    fst.d $fs0, $sp, 16 # 8-byte Folded Spill
+; LA64D-NEXT:    addi.d $sp, $sp, -48
+; LA64D-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
+; LA64D-NEXT:    fst.d $fs0, $sp, 24 # 8-byte Folded Spill
 ; LA64D-NEXT:    move $fp, $a0
 ; LA64D-NEXT:    fld.d $fa0, $a0, 0
 ; LA64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI37_0)
 ; LA64D-NEXT:    addi.d $a0, $a0, %pc_lo12(.LCPI37_0)
 ; LA64D-NEXT:    fld.d $fs0, $a0, 0
-; LA64D-NEXT:    ori $s0, $zero, 8
-; LA64D-NEXT:    addi.d $s1, $sp, 8
-; LA64D-NEXT:    addi.d $s2, $sp, 0
 ; LA64D-NEXT:    .p2align 4, , 16
 ; LA64D-NEXT:  .LBB37_1: # %atomicrmw.start
 ; LA64D-NEXT:    # =>This Inner Loop Header: Depth=1
 ; LA64D-NEXT:    fadd.d $fa1, $fa0, $fs0
-; LA64D-NEXT:    fst.d $fa0, $sp, 8
-; LA64D-NEXT:    fst.d $fa1, $sp, 0
-; LA64D-NEXT:    move $a0, $s0
+; LA64D-NEXT:    fst.d $fa0, $sp, 16
+; LA64D-NEXT:    fst.d $fa1, $sp, 8
+; LA64D-NEXT:    ori $a0, $zero, 8
+; LA64D-NEXT:    addi.d $a2, $sp, 16
+; LA64D-NEXT:    addi.d $a3, $sp, 8
 ; LA64D-NEXT:    move $a1, $fp
-; LA64D-NEXT:    move $a2, $s1
-; LA64D-NEXT:    move $a3, $s2
 ; LA64D-NEXT:    move $a4, $zero
 ; LA64D-NEXT:    move $a5, $zero
 ; LA64D-NEXT:    bl %plt(__atomic_compare_exchange)
-; LA64D-NEXT:    fld.d $fa0, $sp, 8
+; LA64D-NEXT:    fld.d $fa0, $sp, 16
 ; LA64D-NEXT:    beqz $a0, .LBB37_1
 ; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64D-NEXT:    fld.d $fs0, $sp, 16 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s2, $sp, 24 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s1, $sp, 32 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s0, $sp, 40 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $fp, $sp, 48 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $ra, $sp, 56 # 8-byte Folded Reload
-; LA64D-NEXT:    addi.d $sp, $sp, 64
+; LA64D-NEXT:    fld.d $fs0, $sp, 24 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64D-NEXT:    addi.d $sp, $sp, 48
 ; LA64D-NEXT:    ret
   %v = atomicrmw fsub ptr %p, double 1.0 monotonic, align 4
   ret double %v
@@ -3231,90 +2733,69 @@ define double @double_fsub_monotonic(ptr %p) nounwind {
 define double @double_fmin_monotonic(ptr %p) nounwind {
 ; LA64F-LABEL: double_fmin_monotonic:
 ; LA64F:       # %bb.0:
-; LA64F-NEXT:    addi.d $sp, $sp, -80
-; LA64F-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s4, $sp, 24 # 8-byte Folded Spill
+; LA64F-NEXT:    addi.d $sp, $sp, -48
+; LA64F-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s0, $sp, 24 # 8-byte Folded Spill
 ; LA64F-NEXT:    move $fp, $a0
-; LA64F-NEXT:    ld.d $s4, $a0, 0
-; LA64F-NEXT:    lu52i.d $s0, $zero, 1023
-; LA64F-NEXT:    ori $s1, $zero, 8
-; LA64F-NEXT:    addi.d $s2, $sp, 16
-; LA64F-NEXT:    addi.d $s3, $sp, 8
+; LA64F-NEXT:    ld.d $s0, $a0, 0
 ; LA64F-NEXT:    .p2align 4, , 16
 ; LA64F-NEXT:  .LBB38_1: # %atomicrmw.start
 ; LA64F-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64F-NEXT:    move $a0, $s4
-; LA64F-NEXT:    move $a1, $s0
+; LA64F-NEXT:    lu52i.d $a1, $zero, 1023
+; LA64F-NEXT:    move $a0, $s0
 ; LA64F-NEXT:    bl %plt(fmin)
-; LA64F-NEXT:    st.d $s4, $sp, 16
+; LA64F-NEXT:    st.d $s0, $sp, 16
 ; LA64F-NEXT:    st.d $a0, $sp, 8
-; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    ori $a0, $zero, 8
+; LA64F-NEXT:    addi.d $a2, $sp, 16
+; LA64F-NEXT:    addi.d $a3, $sp, 8
 ; LA64F-NEXT:    move $a1, $fp
-; LA64F-NEXT:    move $a2, $s2
-; LA64F-NEXT:    move $a3, $s3
 ; LA64F-NEXT:    move $a4, $zero
 ; LA64F-NEXT:    move $a5, $zero
 ; LA64F-NEXT:    bl %plt(__atomic_compare_exchange)
-; LA64F-NEXT:    ld.d $s4, $sp, 16
+; LA64F-NEXT:    ld.d $s0, $sp, 16
 ; LA64F-NEXT:    beqz $a0, .LBB38_1
 ; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64F-NEXT:    move $a0, $s4
-; LA64F-NEXT:    ld.d $s4, $sp, 24 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
-; LA64F-NEXT:    addi.d $sp, $sp, 80
+; LA64F-NEXT:    move $a0, $s0
+; LA64F-NEXT:    ld.d $s0, $sp, 24 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64F-NEXT:    addi.d $sp, $sp, 48
 ; LA64F-NEXT:    ret
 ;
 ; LA64D-LABEL: double_fmin_monotonic:
 ; LA64D:       # %bb.0:
-; LA64D-NEXT:    addi.d $sp, $sp, -64
-; LA64D-NEXT:    st.d $ra, $sp, 56 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $fp, $sp, 48 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s0, $sp, 40 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s1, $sp, 32 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s2, $sp, 24 # 8-byte Folded Spill
-; LA64D-NEXT:    fst.d $fs0, $sp, 16 # 8-byte Folded Spill
+; LA64D-NEXT:    addi.d $sp, $sp, -48
+; LA64D-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
+; LA64D-NEXT:    fst.d $fs0, $sp, 24 # 8-byte Folded Spill
 ; LA64D-NEXT:    move $fp, $a0
 ; LA64D-NEXT:    fld.d $fa0, $a0, 0
 ; LA64D-NEXT:    addi.d $a0, $zero, 1
 ; LA64D-NEXT:    movgr2fr.d $fa1, $a0
 ; LA64D-NEXT:    ffint.d.l $fs0, $fa1
-; LA64D-NEXT:    ori $s0, $zero, 8
-; LA64D-NEXT:    addi.d $s1, $sp, 8
-; LA64D-NEXT:    addi.d $s2, $sp, 0
 ; LA64D-NEXT:    .p2align 4, , 16
 ; LA64D-NEXT:  .LBB38_1: # %atomicrmw.start
 ; LA64D-NEXT:    # =>This Inner Loop Header: Depth=1
 ; LA64D-NEXT:    fmax.d $fa1, $fa0, $fa0
 ; LA64D-NEXT:    fmin.d $fa1, $fa1, $fs0
-; LA64D-NEXT:    fst.d $fa0, $sp, 8
-; LA64D-NEXT:    fst.d $fa1, $sp, 0
-; LA64D-NEXT:    move $a0, $s0
+; LA64D-NEXT:    fst.d $fa0, $sp, 16
+; LA64D-NEXT:    fst.d $fa1, $sp, 8
+; LA64D-NEXT:    ori $a0, $zero, 8
+; LA64D-NEXT:    addi.d $a2, $sp, 16
+; LA64D-NEXT:    addi.d $a3, $sp, 8
 ; LA64D-NEXT:    move $a1, $fp
-; LA64D-NEXT:    move $a2, $s1
-; LA64D-NEXT:    move $a3, $s2
 ; LA64D-NEXT:    move $a4, $zero
 ; LA64D-NEXT:    move $a5, $zero
 ; LA64D-NEXT:    bl %plt(__atomic_compare_exchange)
-; LA64D-NEXT:    fld.d $fa0, $sp, 8
+; LA64D-NEXT:    fld.d $fa0, $sp, 16
 ; LA64D-NEXT:    beqz $a0, .LBB38_1
 ; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64D-NEXT:    fld.d $fs0, $sp, 16 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s2, $sp, 24 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s1, $sp, 32 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s0, $sp, 40 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $fp, $sp, 48 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $ra, $sp, 56 # 8-byte Folded Reload
-; LA64D-NEXT:    addi.d $sp, $sp, 64
+; LA64D-NEXT:    fld.d $fs0, $sp, 24 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64D-NEXT:    addi.d $sp, $sp, 48
 ; LA64D-NEXT:    ret
   %v = atomicrmw fmin ptr %p, double 1.0 monotonic, align 4
   ret double %v
@@ -3323,90 +2804,69 @@ define double @double_fmin_monotonic(ptr %p) nounwind {
 define double @double_fmax_monotonic(ptr %p) nounwind {
 ; LA64F-LABEL: double_fmax_monotonic:
 ; LA64F:       # %bb.0:
-; LA64F-NEXT:    addi.d $sp, $sp, -80
-; LA64F-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s4, $sp, 24 # 8-byte Folded Spill
+; LA64F-NEXT:    addi.d $sp, $sp, -48
+; LA64F-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s0, $sp, 24 # 8-byte Folded Spill
 ; LA64F-NEXT:    move $fp, $a0
-; LA64F-NEXT:    ld.d $s4, $a0, 0
-; LA64F-NEXT:    lu52i.d $s0, $zero, 1023
-; LA64F-NEXT:    ori $s1, $zero, 8
-; LA64F-NEXT:    addi.d $s2, $sp, 16
-; LA64F-NEXT:    addi.d $s3, $sp, 8
+; LA64F-NEXT:    ld.d $s0, $a0, 0
 ; LA64F-NEXT:    .p2align 4, , 16
 ; LA64F-NEXT:  .LBB39_1: # %atomicrmw.start
 ; LA64F-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64F-NEXT:    move $a0, $s4
-; LA64F-NEXT:    move $a1, $s0
+; LA64F-NEXT:    lu52i.d $a1, $zero, 1023
+; LA64F-NEXT:    move $a0, $s0
 ; LA64F-NEXT:    bl %plt(fmax)
-; LA64F-NEXT:    st.d $s4, $sp, 16
+; LA64F-NEXT:    st.d $s0, $sp, 16
 ; LA64F-NEXT:    st.d $a0, $sp, 8
-; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    ori $a0, $zero, 8
+; LA64F-NEXT:    addi.d $a2, $sp, 16
+; LA64F-NEXT:    addi.d $a3, $sp, 8
 ; LA64F-NEXT:    move $a1, $fp
-; LA64F-NEXT:    move $a2, $s2
-; LA64F-NEXT:    move $a3, $s3
 ; LA64F-NEXT:    move $a4, $zero
 ; LA64F-NEXT:    move $a5, $zero
 ; LA64F-NEXT:    bl %plt(__atomic_compare_exchange)
-; LA64F-NEXT:    ld.d $s4, $sp, 16
+; LA64F-NEXT:    ld.d $s0, $sp, 16
 ; LA64F-NEXT:    beqz $a0, .LBB39_1
 ; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64F-NEXT:    move $a0, $s4
-; LA64F-NEXT:    ld.d $s4, $sp, 24 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
-; LA64F-NEXT:    addi.d $sp, $sp, 80
+; LA64F-NEXT:    move $a0, $s0
+; LA64F-NEXT:    ld.d $s0, $sp, 24 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64F-NEXT:    addi.d $sp, $sp, 48
 ; LA64F-NEXT:    ret
 ;
 ; LA64D-LABEL: double_fmax_monotonic:
 ; LA64D:       # %bb.0:
-; LA64D-NEXT:    addi.d $sp, $sp, -64
-; LA64D-NEXT:    st.d $ra, $sp, 56 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $fp, $sp, 48 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s0, $sp, 40 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s1, $sp, 32 # 8-byte Folded Spill
-; LA64D-NEXT:    st.d $s2, $sp, 24 # 8-byte Folded Spill
-; LA64D-NEXT:    fst.d $fs0, $sp, 16 # 8-byte Folded Spill
+; LA64D-NEXT:    addi.d $sp, $sp, -48
+; LA64D-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
+; LA64D-NEXT:    fst.d $fs0, $sp, 24 # 8-byte Folded Spill
 ; LA64D-NEXT:    move $fp, $a0
 ; LA64D-NEXT:    fld.d $fa0, $a0, 0
 ; LA64D-NEXT:    addi.d $a0, $zero, 1
 ; LA64D-NEXT:    movgr2fr.d $fa1, $a0
 ; LA64D-NEXT:    ffint.d.l $fs0, $fa1
-; LA64D-NEXT:    ori $s0, $zero, 8
-; LA64D-NEXT:    addi.d $s1, $sp, 8
-; LA64D-NEXT:    addi.d $s2, $sp, 0
 ; LA64D-NEXT:    .p2align 4, , 16
 ; LA64D-NEXT:  .LBB39_1: # %atomicrmw.start
 ; LA64D-NEXT:    # =>This Inner Loop Header: Depth=1
 ; LA64D-NEXT:    fmax.d $fa1, $fa0, $fa0
 ; LA64D-NEXT:    fmax.d $fa1, $fa1, $fs0
-; LA64D-NEXT:    fst.d $fa0, $sp, 8
-; LA64D-NEXT:    fst.d $fa1, $sp, 0
-; LA64D-NEXT:    move $a0, $s0
+; LA64D-NEXT:    fst.d $fa0, $sp, 16
+; LA64D-NEXT:    fst.d $fa1, $sp, 8
+; LA64D-NEXT:    ori $a0, $zero, 8
+; LA64D-NEXT:    addi.d $a2, $sp, 16
+; LA64D-NEXT:    addi.d $a3, $sp, 8
 ; LA64D-NEXT:    move $a1, $fp
-; LA64D-NEXT:    move $a2, $s1
-; LA64D-NEXT:    move $a3, $s2
 ; LA64D-NEXT:    move $a4, $zero
 ; LA64D-NEXT:    move $a5, $zero
 ; LA64D-NEXT:    bl %plt(__atomic_compare_exchange)
-; LA64D-NEXT:    fld.d $fa0, $sp, 8
+; LA64D-NEXT:    fld.d $fa0, $sp, 16
 ; LA64D-NEXT:    beqz $a0, .LBB39_1
 ; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64D-NEXT:    fld.d $fs0, $sp, 16 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s2, $sp, 24 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s1, $sp, 32 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $s0, $sp, 40 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $fp, $sp, 48 # 8-byte Folded Reload
-; LA64D-NEXT:    ld.d $ra, $sp, 56 # 8-byte Folded Reload
-; LA64D-NEXT:    addi.d $sp, $sp, 64
+; LA64D-NEXT:    fld.d $fs0, $sp, 24 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64D-NEXT:    addi.d $sp, $sp, 48
 ; LA64D-NEXT:    ret
   %v = atomicrmw fmax ptr %p, double 1.0 monotonic, align 4
   ret double %v
diff --git a/llvm/test/CodeGen/LoongArch/jump-table.ll b/llvm/test/CodeGen/LoongArch/jump-table.ll
index bb3d49157ac6a..51f1ee6da0b85 100644
--- a/llvm/test/CodeGen/LoongArch/jump-table.ll
+++ b/llvm/test/CodeGen/LoongArch/jump-table.ll
@@ -19,31 +19,31 @@ define void @switch_4_arms(i32 %in, ptr %out) nounwind {
 ; LA32-NEXT:    ori $a2, $zero, 2
 ; LA32-NEXT:    blt $a2, $a0, .LBB0_4
 ; LA32-NEXT:  # %bb.1: # %entry
-; LA32-NEXT:    ori $a3, $zero, 1
-; LA32-NEXT:    beq $a0, $a3, .LBB0_7
+; LA32-NEXT:    ori $a2, $zero, 1
+; LA32-NEXT:    beq $a0, $a2, .LBB0_7
 ; LA32-NEXT:  # %bb.2: # %entry
-; LA32-NEXT:    bne $a0, $a2, .LBB0_9
+; LA32-NEXT:    ori $a2, $zero, 2
+; LA32-NEXT:    bne $a0, $a2, .LBB0_10
 ; LA32-NEXT:  # %bb.3: # %bb2
 ; LA32-NEXT:    ori $a0, $zero, 3
-; LA32-NEXT:    st.w $a0, $a1, 0
-; LA32-NEXT:    ret
+; LA32-NEXT:    b .LBB0_9
 ; LA32-NEXT:  .LBB0_4: # %entry
-; LA32-NEXT:    ori $a3, $zero, 3
-; LA32-NEXT:    beq $a0, $a3, .LBB0_8
+; LA32-NEXT:    ori $a2, $zero, 3
+; LA32-NEXT:    beq $a0, $a2, .LBB0_8
 ; LA32-NEXT:  # %bb.5: # %entry
 ; LA32-NEXT:    ori $a2, $zero, 4
-; LA32-NEXT:    bne $a0, $a2, .LBB0_9
+; LA32-NEXT:    bne $a0, $a2, .LBB0_10
 ; LA32-NEXT:  # %bb.6: # %bb4
 ; LA32-NEXT:    ori $a0, $zero, 1
-; LA32-NEXT:    st.w $a0, $a1, 0
-; LA32-NEXT:    ret
+; LA32-NEXT:    b .LBB0_9
 ; LA32-NEXT:  .LBB0_7: # %bb1
 ; LA32-NEXT:    ori $a0, $zero, 4
-; LA32-NEXT:    st.w $a0, $a1, 0
-; LA32-NEXT:    ret
+; LA32-NEXT:    b .LBB0_9
 ; LA32-NEXT:  .LBB0_8: # %bb3
-; LA32-NEXT:    st.w $a2, $a1, 0
+; LA32-NEXT:    ori $a0, $zero, 2
 ; LA32-NEXT:  .LBB0_9: # %exit
+; LA32-NEXT:    st.w $a0, $a1, 0
+; LA32-NEXT:  .LBB0_10: # %exit
 ; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: switch_4_arms:
@@ -52,80 +52,86 @@ define void @switch_4_arms(i32 %in, ptr %out) nounwind {
 ; LA64-NEXT:    ori $a2, $zero, 2
 ; LA64-NEXT:    blt $a2, $a0, .LBB0_4
 ; LA64-NEXT:  # %bb.1: # %entry
-; LA64-NEXT:    ori $a3, $zero, 1
-; LA64-NEXT:    beq $a0, $a3, .LBB0_7
+; LA64-NEXT:    ori $a2, $zero, 1
+; LA64-NEXT:    beq $a0, $a2, .LBB0_7
 ; LA64-NEXT:  # %bb.2: # %entry
-; LA64-NEXT:    bne $a0, $a2, .LBB0_9
+; LA64-NEXT:    ori $a2, $zero, 2
+; LA64-NEXT:    bne $a0, $a2, .LBB0_10
 ; LA64-NEXT:  # %bb.3: # %bb2
 ; LA64-NEXT:    ori $a0, $zero, 3
-; LA64-NEXT:    st.w $a0, $a1, 0
-; LA64-NEXT:    ret
+; LA64-NEXT:    b .LBB0_9
 ; LA64-NEXT:  .LBB0_4: # %entry
-; LA64-NEXT:    ori $a3, $zero, 3
-; LA64-NEXT:    beq $a0, $a3, .LBB0_8
+; LA64-NEXT:    ori $a2, $zero, 3
+; LA64-NEXT:    beq $a0, $a2, .LBB0_8
 ; LA64-NEXT:  # %bb.5: # %entry
 ; LA64-NEXT:    ori $a2, $zero, 4
-; LA64-NEXT:    bne $a0, $a2, .LBB0_9
+; LA64-NEXT:    bne $a0, $a2, .LBB0_10
 ; LA64-NEXT:  # %bb.6: # %bb4
 ; LA64-NEXT:    ori $a0, $zero, 1
-; LA64-NEXT:    st.w $a0, $a1, 0
-; LA64-NEXT:    ret
+; LA64-NEXT:    b .LBB0_9
 ; LA64-NEXT:  .LBB0_7: # %bb1
 ; LA64-NEXT:    ori $a0, $zero, 4
-; LA64-NEXT:    st.w $a0, $a1, 0
-; LA64-NEXT:    ret
+; LA64-NEXT:    b .LBB0_9
 ; LA64-NEXT:  .LBB0_8: # %bb3
-; LA64-NEXT:    st.w $a2, $a1, 0
+; LA64-NEXT:    ori $a0, $zero, 2
 ; LA64-NEXT:  .LBB0_9: # %exit
+; LA64-NEXT:    st.w $a0, $a1, 0
+; LA64-NEXT:  .LBB0_10: # %exit
 ; LA64-NEXT:    ret
 ;
 ; LA32-JT-LABEL: switch_4_arms:
 ; LA32-JT:       # %bb.0: # %entry
-; LA32-JT-NEXT:    addi.w $a2, $a0, -1
-; LA32-JT-NEXT:    ori $a0, $zero, 3
-; LA32-JT-NEXT:    bltu $a0, $a2, .LBB0_6
+; LA32-JT-NEXT:    addi.w $a0, $a0, -1
+; LA32-JT-NEXT:    ori $a2, $zero, 3
+; LA32-JT-NEXT:    bltu $a2, $a0, .LBB0_7
 ; LA32-JT-NEXT:  # %bb.1: # %entry
-; LA32-JT-NEXT:    pcalau12i $a3, %pc_hi20(.LJTI0_0)
-; LA32-JT-NEXT:    addi.w $a3, $a3, %pc_lo12(.LJTI0_0)
-; LA32-JT-NEXT:    alsl.w $a2, $a2, $a3, 2
-; LA32-JT-NEXT:    ld.w $a2, $a2, 0
-; LA32-JT-NEXT:    jr $a2
+; LA32-JT-NEXT:    pcalau12i $a2, %pc_hi20(.LJTI0_0)
+; LA32-JT-NEXT:    addi.w $a2, $a2, %pc_lo12(.LJTI0_0)
+; LA32-JT-NEXT:    alsl.w $a0, $a0, $a2, 2
+; LA32-JT-NEXT:    ld.w $a0, $a0, 0
+; LA32-JT-NEXT:    jr $a0
 ; LA32-JT-NEXT:  .LBB0_2: # %bb1
 ; LA32-JT-NEXT:    ori $a0, $zero, 4
-; LA32-JT-NEXT:    b .LBB0_5
+; LA32-JT-NEXT:    b .LBB0_6
 ; LA32-JT-NEXT:  .LBB0_3: # %bb3
 ; LA32-JT-NEXT:    ori $a0, $zero, 2
-; LA32-JT-NEXT:    b .LBB0_5
+; LA32-JT-NEXT:    b .LBB0_6
 ; LA32-JT-NEXT:  .LBB0_4: # %bb4
 ; LA32-JT-NEXT:    ori $a0, $zero, 1
-; LA32-JT-NEXT:  .LBB0_5: # %exit
-; LA32-JT-NEXT:    st.w $a0, $a1, 0
+; LA32-JT-NEXT:    b .LBB0_6
+; LA32-JT-NEXT:  .LBB0_5: # %bb2
+; LA32-JT-NEXT:    ori $a0, $zero, 3
 ; LA32-JT-NEXT:  .LBB0_6: # %exit
+; LA32-JT-NEXT:    st.w $a0, $a1, 0
+; LA32-JT-NEXT:  .LBB0_7: # %exit
 ; LA32-JT-NEXT:    ret
 ;
 ; LA64-JT-LABEL: switch_4_arms:
 ; LA64-JT:       # %bb.0: # %entry
 ; LA64-JT-NEXT:    addi.w $a0, $a0, 0
-; LA64-JT-NEXT:    addi.d $a2, $a0, -1
-; LA64-JT-NEXT:    ori $a0, $zero, 3
-; LA64-JT-NEXT:    bltu $a0, $a2, .LBB0_6
+; LA64-JT-NEXT:    addi.d $a0, $a0, -1
+; LA64-JT-NEXT:    ori $a2, $zero, 3
+; LA64-JT-NEXT:    bltu $a2, $a0, .LBB0_7
 ; LA64-JT-NEXT:  # %bb.1: # %entry
-; LA64-JT-NEXT:    slli.d $a2, $a2, 3
-; LA64-JT-NEXT:    pcalau12i $a3, %pc_hi20(.LJTI0_0)
-; LA64-JT-NEXT:    addi.d $a3, $a3, %pc_lo12(.LJTI0_0)
-; LA64-JT-NEXT:    ldx.d $a2, $a2, $a3
-; LA64-JT-NEXT:    jr $a2
+; LA64-JT-NEXT:    slli.d $a0, $a0, 3
+; LA64-JT-NEXT:    pcalau12i $a2, %pc_hi20(.LJTI0_0)
+; LA64-JT-NEXT:    addi.d $a2, $a2, %pc_lo12(.LJTI0_0)
+; LA64-JT-NEXT:    ldx.d $a0, $a0, $a2
+; LA64-JT-NEXT:    jr $a0
 ; LA64-JT-NEXT:  .LBB0_2: # %bb1
 ; LA64-JT-NEXT:    ori $a0, $zero, 4
-; LA64-JT-NEXT:    b .LBB0_5
+; LA64-JT-NEXT:    b .LBB0_6
 ; LA64-JT-NEXT:  .LBB0_3: # %bb3
 ; LA64-JT-NEXT:    ori $a0, $zero, 2
-; LA64-JT-NEXT:    b .LBB0_5
+; LA64-JT-NEXT:    b .LBB0_6
 ; LA64-JT-NEXT:  .LBB0_4: # %bb4
 ; LA64-JT-NEXT:    ori $a0, $zero, 1
-; LA64-JT-NEXT:  .LBB0_5: # %exit
-; LA64-JT-NEXT:    st.w $a0, $a1, 0
+; LA64-JT-NEXT:    b .LBB0_6
+; LA64-JT-NEXT:  .LBB0_5: # %bb2
+; LA64-JT-NEXT:    ori $a0, $zero, 3
 ; LA64-JT-NEXT:  .LBB0_6: # %exit
+; LA64-JT-NEXT:    st.w $a0, $a1, 0
+; LA64-JT-NEXT:  .LBB0_7: # %exit
 ; LA64-JT-NEXT:    ret
 entry:
   switch i32 %in, label %exit [
diff --git a/llvm/test/CodeGen/LoongArch/register-coalescer-crash-pr79718.mir b/llvm/test/CodeGen/LoongArch/register-coalescer-crash-pr79718.mir
index 9c62983c6d450..be62b73b14c3f 100644
--- a/llvm/test/CodeGen/LoongArch/register-coalescer-crash-pr79718.mir
+++ b/llvm/test/CodeGen/LoongArch/register-coalescer-crash-pr79718.mir
@@ -78,7 +78,7 @@ body:             |
   ; CHECK-NEXT:   successors: %bb.11(0x80000000)
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:gpr = ADDI_D [[COPY6]], 1
-  ; CHECK-NEXT:   [[COPY6:%[0-9]+]]:gpr = COPY [[ORI]]
+  ; CHECK-NEXT:   [[COPY6:%[0-9]+]]:gpr = ORI $r0, 1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.11:
   ; CHECK-NEXT:   successors: %bb.12(0x80000000)
diff --git a/llvm/test/CodeGen/LoongArch/sextw-removal.ll b/llvm/test/CodeGen/LoongArch/sextw-removal.ll
index ba889fc8393b4..2bb39395c1d1b 100644
--- a/llvm/test/CodeGen/LoongArch/sextw-removal.ll
+++ b/llvm/test/CodeGen/LoongArch/sextw-removal.ll
@@ -825,22 +825,22 @@ define signext i32 @test14b(i32 %0, i32 signext %1) {
 ; CHECK-NEXT:    ori $a2, $zero, 2
 ; CHECK-NEXT:    blt $a1, $a2, .LBB14_4
 ; CHECK-NEXT:  # %bb.1: # %.preheader
-; CHECK-NEXT:    ori $a3, $zero, 1
-; CHECK-NEXT:    addi.d $a2, $zero, -1
-; CHECK-NEXT:    ori $a4, $zero, 1000
+; CHECK-NEXT:    ori $a2, $zero, 1
+; CHECK-NEXT:    ori $a3, $zero, 1000
 ; CHECK-NEXT:    .p2align 4, , 16
 ; CHECK-NEXT:  .LBB14_2: # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    addi.w $a5, $a0, 0
-; CHECK-NEXT:    blt $a4, $a5, .LBB14_5
+; CHECK-NEXT:    addi.w $a4, $a0, 0
+; CHECK-NEXT:    blt $a3, $a4, .LBB14_5
 ; CHECK-NEXT:  # %bb.3: # in Loop: Header=BB14_2 Depth=1
-; CHECK-NEXT:    add.d $a0, $a3, $a0
-; CHECK-NEXT:    addi.w $a3, $a3, 1
-; CHECK-NEXT:    blt $a3, $a1, .LBB14_2
+; CHECK-NEXT:    add.d $a0, $a2, $a0
+; CHECK-NEXT:    addi.w $a2, $a2, 1
+; CHECK-NEXT:    blt $a2, $a1, .LBB14_2
 ; CHECK-NEXT:  .LBB14_4:
 ; CHECK-NEXT:    addi.w $a0, $a0, 0
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:  .LBB14_5:
-; CHECK-NEXT:    addi.w $a0, $a2, 0
+; CHECK-NEXT:    addi.d $a0, $zero, -1
+; CHECK-NEXT:    addi.w $a0, $a0, 0
 ; CHECK-NEXT:    ret
 ;
 ; NORMV-LABEL: test14b:
@@ -848,24 +848,24 @@ define signext i32 @test14b(i32 %0, i32 signext %1) {
 ; NORMV-NEXT:    ori $a2, $zero, 2
 ; NORMV-NEXT:    blt $a1, $a2, .LBB14_4
 ; NORMV-NEXT:  # %bb.1: # %.preheader
-; NORMV-NEXT:    ori $a3, $zero, 1
-; NORMV-NEXT:    addi.d $a2, $zero, -1
-; NORMV-NEXT:    ori $a4, $zero, 1000
+; NORMV-NEXT:    ori $a2, $zero, 1
+; NORMV-NEXT:    ori $a3, $zero, 1000
 ; NORMV-NEXT:    .p2align 4, , 16
 ; NORMV-NEXT:  .LBB14_2: # =>This Inner Loop Header: Depth=1
-; NORMV-NEXT:    addi.w $a5, $a0, 0
-; NORMV-NEXT:    blt $a4, $a5, .LBB14_5
+; NORMV-NEXT:    addi.w $a4, $a0, 0
+; NORMV-NEXT:    blt $a3, $a4, .LBB14_5
 ; NORMV-NEXT:  # %bb.3: # in Loop: Header=BB14_2 Depth=1
-; NORMV-NEXT:    add.d $a0, $a3, $a0
-; NORMV-NEXT:    addi.d $a3, $a3, 1
-; NORMV-NEXT:    addi.w $a3, $a3, 0
+; NORMV-NEXT:    add.d $a0, $a2, $a0
+; NORMV-NEXT:    addi.d $a2, $a2, 1
+; NORMV-NEXT:    addi.w $a2, $a2, 0
 ; NORMV-NEXT:    addi.d $a0, $a0, 0
-; NORMV-NEXT:    blt $a3, $a1, .LBB14_2
+; NORMV-NEXT:    blt $a2, $a1, .LBB14_2
 ; NORMV-NEXT:  .LBB14_4:
 ; NORMV-NEXT:    addi.w $a0, $a0, 0
 ; NORMV-NEXT:    ret
 ; NORMV-NEXT:  .LBB14_5:
-; NORMV-NEXT:    addi.w $a0, $a2, 0
+; NORMV-NEXT:    addi.d $a0, $zero, -1
+; NORMV-NEXT:    addi.w $a0, $a0, 0
 ; NORMV-NEXT:    ret
   %3 = icmp sgt i32 %1, 1
   br i1 %3, label %4, label %12
@@ -893,22 +893,22 @@ define signext i32 @test14c(i32 zeroext %0, i32 signext %1) {
 ; CHECK-NEXT:    ori $a2, $zero, 2
 ; CHECK-NEXT:    blt $a1, $a2, .LBB15_4
 ; CHECK-NEXT:  # %bb.1: # %.preheader
-; CHECK-NEXT:    ori $a3, $zero, 1
-; CHECK-NEXT:    addi.d $a2, $zero, -1
-; CHECK-NEXT:    ori $a4, $zero, 1000
+; CHECK-NEXT:    ori $a2, $zero, 1
+; CHECK-NEXT:    ori $a3, $zero, 1000
 ; CHECK-NEXT:    .p2align 4, , 16
 ; CHECK-NEXT:  .LBB15_2: # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    addi.w $a5, $a0, 0
-; CHECK-NEXT:    blt $a4, $a5, .LBB15_5
+; CHECK-NEXT:    addi.w $a4, $a0, 0
+; CHECK-NEXT:    blt $a3, $a4, .LBB15_5
 ; CHECK-NEXT:  # %bb.3: # in Loop: Header=BB15_2 Depth=1
-; CHECK-NEXT:    add.d $a0, $a3, $a0
-; CHECK-NEXT:    addi.w $a3, $a3, 1
-; CHECK-NEXT:    blt $a3, $a1, .LBB15_2
+; CHECK-NEXT:    add.d $a0, $a2, $a0
+; CHECK-NEXT:    addi.w $a2, $a2, 1
+; CHECK-NEXT:    blt $a2, $a1, .LBB15_2
 ; CHECK-NEXT:  .LBB15_4:
 ; CHECK-NEXT:    addi.w $a0, $a0, 0
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:  .LBB15_5:
-; CHECK-NEXT:    addi.w $a0, $a2, 0
+; CHECK-NEXT:    addi.d $a0, $zero, -1
+; CHECK-NEXT:    addi.w $a0, $a0, 0
 ; CHECK-NEXT:    ret
 ;
 ; NORMV-LABEL: test14c:
@@ -916,24 +916,24 @@ define signext i32 @test14c(i32 zeroext %0, i32 signext %1) {
 ; NORMV-NEXT:    ori $a2, $zero, 2
 ; NORMV-NEXT:    blt $a1, $a2, .LBB15_4
 ; NORMV-NEXT:  # %bb.1: # %.preheader
-; NORMV-NEXT:    ori $a3, $zero, 1
-; NORMV-NEXT:    addi.d $a2, $zero, -1
-; NORMV-NEXT:    ori $a4, $zero, 1000
+; NORMV-NEXT:    ori $a2, $zero, 1
+; NORMV-NEXT:    ori $a3, $zero, 1000
 ; NORMV-NEXT:    .p2align 4, , 16
 ; NORMV-NEXT:  .LBB15_2: # =>This Inner Loop Header: Depth=1
-; NORMV-NEXT:    addi.w $a5, $a0, 0
-; NORMV-NEXT:    blt $a4, $a5, .LBB15_5
+; NORMV-NEXT:    addi.w $a4, $a0, 0
+; NORMV-NEXT:    blt $a3, $a4, .LBB15_5
 ; NORMV-NEXT:  # %bb.3: # in Loop: Header=BB15_2 Depth=1
-; NORMV-NEXT:    add.d $a0, $a3, $a0
-; NORMV-NEXT:    addi.d $a3, $a3, 1
-; NORMV-NEXT:    addi.w $a3, $a3, 0
+; NORMV-NEXT:    add.d $a0, $a2, $a0
+; NORMV-NEXT:    addi.d $a2, $a2, 1
+; NORMV-NEXT:    addi.w $a2, $a2, 0
 ; NORMV-NEXT:    addi.d $a0, $a0, 0
-; NORMV-NEXT:    blt $a3, $a1, .LBB15_2
+; NORMV-NEXT:    blt $a2, $a1, .LBB15_2
 ; NORMV-NEXT:  .LBB15_4:
 ; NORMV-NEXT:    addi.w $a0, $a0, 0
 ; NORMV-NEXT:    ret
 ; NORMV-NEXT:  .LBB15_5:
-; NORMV-NEXT:    addi.w $a0, $a2, 0
+; NORMV-NEXT:    addi.d $a0, $zero, -1
+; NORMV-NEXT:    addi.w $a0, $a0, 0
 ; NORMV-NEXT:    ret
   %3 = icmp sgt i32 %1, 1
   br i1 %3, label %4, label %12
diff --git a/llvm/test/CodeGen/LoongArch/vararg.ll b/llvm/test/CodeGen/LoongArch/vararg.ll
index 112459ab1f4d4..bbf3b8e333306 100644
--- a/llvm/test/CodeGen/LoongArch/vararg.ll
+++ b/llvm/test/CodeGen/LoongArch/vararg.ll
@@ -292,14 +292,14 @@ define void @va_aligned_stack_caller() nounwind {
 ; LA64-FPELIM-NEXT:    st.d $zero, $sp, 80
 ; LA64-FPELIM-NEXT:    st.d $zero, $sp, 72
 ; LA64-FPELIM-NEXT:    ori $a5, $zero, 1000
+; LA64-FPELIM-NEXT:    ori $a0, $zero, 1
 ; LA64-FPELIM-NEXT:    ori $a1, $zero, 11
 ; LA64-FPELIM-NEXT:    addi.d $a2, $sp, 64
 ; LA64-FPELIM-NEXT:    ori $a3, $zero, 12
 ; LA64-FPELIM-NEXT:    ori $a4, $zero, 13
-; LA64-FPELIM-NEXT:    ori $a0, $zero, 1
+; LA64-FPELIM-NEXT:    ori $a7, $zero, 1
 ; LA64-FPELIM-NEXT:    st.d $a5, $sp, 64
 ; LA64-FPELIM-NEXT:    move $a6, $zero
-; LA64-FPELIM-NEXT:    move $a7, $a0
 ; LA64-FPELIM-NEXT:    bl %plt(va_aligned_stack_callee)
 ; LA64-FPELIM-NEXT:    ld.d $ra, $sp, 104 # 8-byte Folded Reload
 ; LA64-FPELIM-NEXT:    addi.d $sp, $sp, 112
@@ -333,14 +333,14 @@ define void @va_aligned_stack_caller() nounwind {
 ; LA64-WITHFP-NEXT:    st.d $zero, $fp, -32
 ; LA64-WITHFP-NEXT:    st.d $zero, $fp, -40
 ; LA64-WITHFP-NEXT:    ori $a5, $zero, 1000
+; LA64-WITHFP-NEXT:    ori $a0, $zero, 1
 ; LA64-WITHFP-NEXT:    ori $a1, $zero, 11
 ; LA64-WITHFP-NEXT:    addi.d $a2, $fp, -48
 ; LA64-WITHFP-NEXT:    ori $a3, $zero, 12
 ; LA64-WITHFP-NEXT:    ori $a4, $zero, 13
-; LA64-WITHFP-NEXT:    ori $a0, $zero, 1
+; LA64-WITHFP-NEXT:    ori $a7, $zero, 1
 ; LA64-WITHFP-NEXT:    st.d $a5, $fp, -48
 ; LA64-WITHFP-NEXT:    move $a6, $zero
-; LA64-WITHFP-NEXT:    move $a7, $a0
 ; LA64-WITHFP-NEXT:    bl %plt(va_aligned_stack_callee)
 ; LA64-WITHFP-NEXT:    ld.d $fp, $sp, 96 # 8-byte Folded Reload
 ; LA64-WITHFP-NEXT:    ld.d $ra, $sp, 104 # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/LoongArch/vector-fp-imm.ll b/llvm/test/CodeGen/LoongArch/vector-fp-imm.ll
index 0a401ebe5f6b2..531a56dbe814e 100644
--- a/llvm/test/CodeGen/LoongArch/vector-fp-imm.ll
+++ b/llvm/test/CodeGen/LoongArch/vector-fp-imm.ll
@@ -558,15 +558,15 @@ define void @test_d4(ptr %P, ptr %S) nounwind {
 ; LA32F-NEXT:    st.w $s5, $sp, 16 # 4-byte Folded Spill
 ; LA32F-NEXT:    st.w $s6, $sp, 12 # 4-byte Folded Spill
 ; LA32F-NEXT:    st.w $s7, $sp, 8 # 4-byte Folded Spill
-; LA32F-NEXT:    ld.w $fp, $a0, 24
-; LA32F-NEXT:    ld.w $s0, $a0, 28
-; LA32F-NEXT:    ld.w $s1, $a0, 16
-; LA32F-NEXT:    ld.w $s2, $a0, 20
-; LA32F-NEXT:    ld.w $s3, $a0, 8
-; LA32F-NEXT:    ld.w $s4, $a0, 12
+; LA32F-NEXT:    ld.w $s0, $a0, 24
+; LA32F-NEXT:    ld.w $s1, $a0, 28
+; LA32F-NEXT:    ld.w $s2, $a0, 16
+; LA32F-NEXT:    ld.w $s3, $a0, 20
+; LA32F-NEXT:    ld.w $s4, $a0, 8
+; LA32F-NEXT:    ld.w $s5, $a0, 12
 ; LA32F-NEXT:    ld.w $a2, $a0, 0
 ; LA32F-NEXT:    ld.w $a4, $a0, 4
-; LA32F-NEXT:    move $s5, $a1
+; LA32F-NEXT:    move $fp, $a1
 ; LA32F-NEXT:    lu12i.w $a3, 261888
 ; LA32F-NEXT:    move $a0, $a2
 ; LA32F-NEXT:    move $a1, $a4
@@ -575,32 +575,32 @@ define void @test_d4(ptr %P, ptr %S) nounwind {
 ; LA32F-NEXT:    move $s6, $a0
 ; LA32F-NEXT:    move $s7, $a1
 ; LA32F-NEXT:    lu12i.w $a3, 262144
-; LA32F-NEXT:    move $a0, $s3
-; LA32F-NEXT:    move $a1, $s4
+; LA32F-NEXT:    move $a0, $s4
+; LA32F-NEXT:    move $a1, $s5
 ; LA32F-NEXT:    move $a2, $zero
 ; LA32F-NEXT:    bl %plt(__adddf3)
-; LA32F-NEXT:    move $s3, $a0
-; LA32F-NEXT:    move $s4, $a1
+; LA32F-NEXT:    move $s4, $a0
+; LA32F-NEXT:    move $s5, $a1
 ; LA32F-NEXT:    lu12i.w $a3, 262272
-; LA32F-NEXT:    move $a0, $s1
-; LA32F-NEXT:    move $a1, $s2
+; LA32F-NEXT:    move $a0, $s2
+; LA32F-NEXT:    move $a1, $s3
 ; LA32F-NEXT:    move $a2, $zero
 ; LA32F-NEXT:    bl %plt(__adddf3)
-; LA32F-NEXT:    move $s1, $a0
-; LA32F-NEXT:    move $s2, $a1
+; LA32F-NEXT:    move $s2, $a0
+; LA32F-NEXT:    move $s3, $a1
 ; LA32F-NEXT:    lu12i.w $a3, 262400
-; LA32F-NEXT:    move $a0, $fp
-; LA32F-NEXT:    move $a1, $s0
+; LA32F-NEXT:    move $a0, $s0
+; LA32F-NEXT:    move $a1, $s1
 ; LA32F-NEXT:    move $a2, $zero
 ; LA32F-NEXT:    bl %plt(__adddf3)
-; LA32F-NEXT:    st.w $a0, $s5, 24
-; LA32F-NEXT:    st.w $a1, $s5, 28
-; LA32F-NEXT:    st.w $s1, $s5, 16
-; LA32F-NEXT:    st.w $s2, $s5, 20
-; LA32F-NEXT:    st.w $s3, $s5, 8
-; LA32F-NEXT:    st.w $s4, $s5, 12
-; LA32F-NEXT:    st.w $s6, $s5, 0
-; LA32F-NEXT:    st.w $s7, $s5, 4
+; LA32F-NEXT:    st.w $a0, $fp, 24
+; LA32F-NEXT:    st.w $a1, $fp, 28
+; LA32F-NEXT:    st.w $s2, $fp, 16
+; LA32F-NEXT:    st.w $s3, $fp, 20
+; LA32F-NEXT:    st.w $s4, $fp, 8
+; LA32F-NEXT:    st.w $s5, $fp, 12
+; LA32F-NEXT:    st.w $s6, $fp, 0
+; LA32F-NEXT:    st.w $s7, $fp, 4
 ; LA32F-NEXT:    ld.w $s7, $sp, 8 # 4-byte Folded Reload
 ; LA32F-NEXT:    ld.w $s6, $sp, 12 # 4-byte Folded Reload
 ; LA32F-NEXT:    ld.w $s5, $sp, 16 # 4-byte Folded Reload
@@ -767,15 +767,13 @@ define void @test_d8(ptr %P, ptr %S) nounwind {
 ; LA32F-NEXT:    move $a0, $s7
 ; LA32F-NEXT:    move $a1, $s0
 ; LA32F-NEXT:    move $a2, $zero
-; LA32F-NEXT:    move $s0, $a3
 ; LA32F-NEXT:    bl %plt(__adddf3)
 ; LA32F-NEXT:    st.w $a0, $sp, 24 # 4-byte Folded Spill
 ; LA32F-NEXT:    st.w $a1, $sp, 20 # 4-byte Folded Spill
-; LA32F-NEXT:    lu12i.w $s7, 262272
+; LA32F-NEXT:    lu12i.w $a3, 262272
 ; LA32F-NEXT:    move $a0, $s1
 ; LA32F-NEXT:    move $a1, $s2
 ; LA32F-NEXT:    move $a2, $zero
-; LA32F-NEXT:    move $a3, $s7
 ; LA32F-NEXT:    bl %plt(__adddf3)
 ; LA32F-NEXT:    st.w $a0, $sp, 12 # 4-byte Folded Spill
 ; LA32F-NEXT:    move $s2, $a1
@@ -786,40 +784,40 @@ define void @test_d8(ptr %P, ptr %S) nounwind {
 ; LA32F-NEXT:    bl %plt(__adddf3)
 ; LA32F-NEXT:    move $s5, $a0
 ; LA32F-NEXT:    move $s6, $a1
+; LA32F-NEXT:    lu12i.w $a3, 261888
 ; LA32F-NEXT:    move $a0, $s3
 ; LA32F-NEXT:    move $a1, $s4
 ; LA32F-NEXT:    move $a2, $zero
-; LA32F-NEXT:    lu12i.w $a3, 261888
 ; LA32F-NEXT:    bl %plt(__adddf3)
-; LA32F-NEXT:    move $s3, $a0
-; LA32F-NEXT:    move $s4, $a1
+; LA32F-NEXT:    move $s0, $a0
+; LA32F-NEXT:    move $s3, $a1
+; LA32F-NEXT:    lu12i.w $a3, 262144
 ; LA32F-NEXT:    move $a0, $s8
 ; LA32F-NEXT:    ld.w $a1, $sp, 16 # 4-byte Folded Reload
 ; LA32F-NEXT:    move $a2, $zero
-; LA32F-NEXT:    move $a3, $s0
 ; LA32F-NEXT:    bl %plt(__adddf3)
-; LA32F-NEXT:    move $s8, $a0
-; LA32F-NEXT:    move $s0, $a1
+; LA32F-NEXT:    move $s4, $a0
+; LA32F-NEXT:    move $s8, $a1
+; LA32F-NEXT:    lu12i.w $a3, 262272
 ; LA32F-NEXT:    ld.w $a0, $sp, 32 # 4-byte Folded Reload
 ; LA32F-NEXT:    ld.w $a1, $sp, 28 # 4-byte Folded Reload
 ; LA32F-NEXT:    move $a2, $zero
-; LA32F-NEXT:    move $a3, $s7
 ; LA32F-NEXT:    bl %plt(__adddf3)
 ; LA32F-NEXT:    move $s7, $a0
 ; LA32F-NEXT:    move $s1, $a1
+; LA32F-NEXT:    lu12i.w $a3, 262400
 ; LA32F-NEXT:    ld.w $a0, $sp, 48 # 4-byte Folded Reload
 ; LA32F-NEXT:    ld.w $a1, $sp, 44 # 4-byte Folded Reload
 ; LA32F-NEXT:    move $a2, $zero
-; LA32F-NEXT:    lu12i.w $a3, 262400
 ; LA32F-NEXT:    bl %plt(__adddf3)
 ; LA32F-NEXT:    st.w $a0, $fp, 56
 ; LA32F-NEXT:    st.w $a1, $fp, 60
 ; LA32F-NEXT:    st.w $s7, $fp, 48
 ; LA32F-NEXT:    st.w $s1, $fp, 52
-; LA32F-NEXT:    st.w $s8, $fp, 40
-; LA32F-NEXT:    st.w $s0, $fp, 44
-; LA32F-NEXT:    st.w $s3, $fp, 32
-; LA32F-NEXT:    st.w $s4, $fp, 36
+; LA32F-NEXT:    st.w $s4, $fp, 40
+; LA32F-NEXT:    st.w $s8, $fp, 44
+; LA32F-NEXT:    st.w $s0, $fp, 32
+; LA32F-NEXT:    st.w $s3, $fp, 36
 ; LA32F-NEXT:    st.w $s5, $fp, 24
 ; LA32F-NEXT:    st.w $s6, $fp, 28
 ; LA32F-NEXT:    ld.w $a0, $sp, 12 # 4-byte Folded Reload
@@ -890,85 +888,80 @@ define void @test_d8(ptr %P, ptr %S) nounwind {
 ;
 ; LA64F-LABEL: test_d8:
 ; LA64F:       # %bb.0:
-; LA64F-NEXT:    addi.d $sp, $sp, -112
-; LA64F-NEXT:    st.d $ra, $sp, 104 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $fp, $sp, 96 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s0, $sp, 88 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s1, $sp, 80 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s2, $sp, 72 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s3, $sp, 64 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s4, $sp, 56 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s5, $sp, 48 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s6, $sp, 40 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s7, $sp, 32 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s8, $sp, 24 # 8-byte Folded Spill
-; LA64F-NEXT:    ld.d $a2, $a0, 56
-; LA64F-NEXT:    st.d $a2, $sp, 16 # 8-byte Folded Spill
-; LA64F-NEXT:    ld.d $s1, $a0, 40
-; LA64F-NEXT:    ld.d $s2, $a0, 32
-; LA64F-NEXT:    ld.d $s3, $a0, 24
-; LA64F-NEXT:    ld.d $s4, $a0, 8
-; LA64F-NEXT:    ld.d $s5, $a0, 0
-; LA64F-NEXT:    ld.d $s6, $a0, 48
+; LA64F-NEXT:    addi.d $sp, $sp, -96
+; LA64F-NEXT:    st.d $ra, $sp, 88 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $fp, $sp, 80 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s0, $sp, 72 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s1, $sp, 64 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s2, $sp, 56 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s3, $sp, 48 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s4, $sp, 40 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s5, $sp, 32 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s6, $sp, 24 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s7, $sp, 16 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s8, $sp, 8 # 8-byte Folded Spill
+; LA64F-NEXT:    ld.d $s0, $a0, 56
+; LA64F-NEXT:    ld.d $s2, $a0, 40
+; LA64F-NEXT:    ld.d $s3, $a0, 32
+; LA64F-NEXT:    ld.d $s4, $a0, 24
+; LA64F-NEXT:    ld.d $s5, $a0, 8
+; LA64F-NEXT:    ld.d $s6, $a0, 0
+; LA64F-NEXT:    ld.d $s7, $a0, 48
 ; LA64F-NEXT:    ld.d $a0, $a0, 16
 ; LA64F-NEXT:    move $fp, $a1
 ; LA64F-NEXT:    ori $a1, $zero, 0
 ; LA64F-NEXT:    lu32i.d $a1, -524288
-; LA64F-NEXT:    lu52i.d $s7, $a1, 1024
-; LA64F-NEXT:    move $a1, $s7
+; LA64F-NEXT:    lu52i.d $s8, $a1, 1024
+; LA64F-NEXT:    move $a1, $s8
+; LA64F-NEXT:    bl %plt(__adddf3)
+; LA64F-NEXT:    move $s1, $a0
+; LA64F-NEXT:    move $a0, $s7
+; LA64F-NEXT:    move $a1, $s8
 ; LA64F-NEXT:    bl %plt(__adddf3)
-; LA64F-NEXT:    st.d $a0, $sp, 8 # 8-byte Folded Spill
+; LA64F-NEXT:    move $s7, $a0
+; LA64F-NEXT:    lu52i.d $a1, $zero, 1023
 ; LA64F-NEXT:    move $a0, $s6
-; LA64F-NEXT:    move $a1, $s7
 ; LA64F-NEXT:    bl %plt(__adddf3)
 ; LA64F-NEXT:    move $s6, $a0
-; LA64F-NEXT:    lu52i.d $s7, $zero, 1023
+; LA64F-NEXT:    lu52i.d $a1, $zero, 1024
 ; LA64F-NEXT:    move $a0, $s5
-; LA64F-NEXT:    move $a1, $s7
 ; LA64F-NEXT:    bl %plt(__adddf3)
 ; LA64F-NEXT:    move $s5, $a0
-; LA64F-NEXT:    lu52i.d $s0, $zero, 1024
+; LA64F-NEXT:    lu52i.d $a1, $zero, 1025
 ; LA64F-NEXT:    move $a0, $s4
-; LA64F-NEXT:    move $a1, $s0
 ; LA64F-NEXT:    bl %plt(__adddf3)
 ; LA64F-NEXT:    move $s4, $a0
-; LA64F-NEXT:    lu52i.d $s8, $zero, 1025
+; LA64F-NEXT:    lu52i.d $a1, $zero, 1023
 ; LA64F-NEXT:    move $a0, $s3
-; LA64F-NEXT:    move $a1, $s8
 ; LA64F-NEXT:    bl %plt(__adddf3)
 ; LA64F-NEXT:    move $s3, $a0
+; LA64F-NEXT:    lu52i.d $a1, $zero, 1024
 ; LA64F-NEXT:    move $a0, $s2
-; LA64F-NEXT:    move $a1, $s7
 ; LA64F-NEXT:    bl %plt(__adddf3)
 ; LA64F-NEXT:    move $s2, $a0
-; LA64F-NEXT:    move $a0, $s1
-; LA64F-NEXT:    move $a1, $s0
-; LA64F-NEXT:    bl %plt(__adddf3)
-; LA64F-NEXT:    move $s0, $a0
-; LA64F-NEXT:    ld.d $a0, $sp, 16 # 8-byte Folded Reload
-; LA64F-NEXT:    move $a1, $s8
+; LA64F-NEXT:    lu52i.d $a1, $zero, 1025
+; LA64F-NEXT:    move $a0, $s0
 ; LA64F-NEXT:    bl %plt(__adddf3)
 ; LA64F-NEXT:    st.d $a0, $fp, 56
-; LA64F-NEXT:    st.d $s0, $fp, 40
-; LA64F-NEXT:    st.d $s2, $fp, 32
-; LA64F-NEXT:    st.d $s3, $fp, 24
-; LA64F-NEXT:    st.d $s4, $fp, 8
-; LA64F-NEXT:    st.d $s5, $fp, 0
-; LA64F-NEXT:    st.d $s6, $fp, 48
-; LA64F-NEXT:    ld.d $a0, $sp, 8 # 8-byte Folded Reload
-; LA64F-NEXT:    st.d $a0, $fp, 16
-; LA64F-NEXT:    ld.d $s8, $sp, 24 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s7, $sp, 32 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s6, $sp, 40 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s5, $sp, 48 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s4, $sp, 56 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s3, $sp, 64 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s2, $sp, 72 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s1, $sp, 80 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s0, $sp, 88 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $fp, $sp, 96 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $ra, $sp, 104 # 8-byte Folded Reload
-; LA64F-NEXT:    addi.d $sp, $sp, 112
+; LA64F-NEXT:    st.d $s2, $fp, 40
+; LA64F-NEXT:    st.d $s3, $fp, 32
+; LA64F-NEXT:    st.d $s4, $fp, 24
+; LA64F-NEXT:    st.d $s5, $fp, 8
+; LA64F-NEXT:    st.d $s6, $fp, 0
+; LA64F-NEXT:    st.d $s7, $fp, 48
+; LA64F-NEXT:    st.d $s1, $fp, 16
+; LA64F-NEXT:    ld.d $s8, $sp, 8 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s7, $sp, 16 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s6, $sp, 24 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s5, $sp, 32 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s4, $sp, 40 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s3, $sp, 48 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s2, $sp, 56 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s1, $sp, 64 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s0, $sp, 72 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $fp, $sp, 80 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $ra, $sp, 88 # 8-byte Folded Reload
+; LA64F-NEXT:    addi.d $sp, $sp, 96
 ; LA64F-NEXT:    ret
 ;
 ; LA64D-LABEL: test_d8:



More information about the llvm-commits mailing list