[llvm] f9e0845 - [LoongArch] Explicitly specify instruction properties

Weining Lu via llvm-commits llvm-commits at lists.llvm.org
Mon Jul 10 17:45:55 PDT 2023


Author: Wang Rui
Date: 2023-07-11T08:45:24+08:00
New Revision: f9e0845ef2f64f656e40fe84ee372590d0b8cb3b

URL: https://github.com/llvm/llvm-project/commit/f9e0845ef2f64f656e40fe84ee372590d0b8cb3b
DIFF: https://github.com/llvm/llvm-project/commit/f9e0845ef2f64f656e40fe84ee372590d0b8cb3b.diff

LOG: [LoongArch] Explicitly specify instruction properties

This revision explicitly specifies the machine instruction properties instead of relying on guesswork. This is because guessing instruction properties has proven to be inaccurate, such as the machine LICM not working:

```
void func(char *a, char *b)
{
    int i;

    for (i = 0; i != 72526; i++)
        a[i] = b[i];
}
```

Guessing instruction properties:

```
func:                                   # @func
        move    $a2, $zero
.LBB0_1:                                # =>This Inner Loop Header: Depth=1
        ldx.b   $a3, $a1, $a2
        stx.b   $a3, $a0, $a2
        addi.d  $a2, $a2, 1
        lu12i.w $a3, 17
        ori     $a3, $a3, 2894
        bne     $a2, $a3, .LBB0_1
        ret
.Lfunc_end0:
```

Explicitly specify instruction properties:

```
func:                                   # @func
        lu12i.w $a2, 17
        ori     $a2, $a2, 2894
        move    $a3, $zero
.LBB0_1:                                # =>This Inner Loop Header: Depth=1
        ldx.b   $a4, $a1, $a3
        stx.b   $a4, $a0, $a3
        addi.d  $a3, $a3, 1
        bne     $a3, $a2, .LBB0_1
        ret
.Lfunc_end0:
```

Reviewed By: SixWeining, xen0n

Differential Revision: https://reviews.llvm.org/D154192

Added: 
    

Modified: 
    llvm/lib/Target/LoongArch/LoongArch.td
    llvm/lib/Target/LoongArch/LoongArchFloat32InstrInfo.td
    llvm/lib/Target/LoongArch/LoongArchFloat64InstrInfo.td
    llvm/lib/Target/LoongArch/LoongArchFloatInstrFormats.td
    llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
    llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw-fp.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/fcmp-dbl.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/LoongArch/LoongArch.td b/llvm/lib/Target/LoongArch/LoongArch.td
index 6f4af3dc20b008..7241a5d63526fc 100644
--- a/llvm/lib/Target/LoongArch/LoongArch.td
+++ b/llvm/lib/Target/LoongArch/LoongArch.td
@@ -132,9 +132,7 @@ def : ProcessorModel<"la464", NoSchedModel, [Feature64Bit,
 //===----------------------------------------------------------------------===//
 
 def LoongArchInstrInfo : InstrInfo {
-  // guess mayLoad, mayStore, and hasSideEffects
-  // This option is a temporary migration help. It will go away.
-  let guessInstructionProperties = 1;
+  let guessInstructionProperties = 0;
 }
 
 def LoongArchAsmParser : AsmParser {

diff  --git a/llvm/lib/Target/LoongArch/LoongArchFloat32InstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchFloat32InstrInfo.td
index 799029740f2c8e..b3d42f412f5e12 100644
--- a/llvm/lib/Target/LoongArch/LoongArchFloat32InstrInfo.td
+++ b/llvm/lib/Target/LoongArch/LoongArchFloat32InstrInfo.td
@@ -98,8 +98,10 @@ def FSEL_S : FP_SEL<0b00001101000000, "fsel", FPR32>;
 def FMOV_S     : FP_MOV<0b0000000100010100100101, "fmov.s", FPR32, FPR32>;
 def MOVGR2FR_W : FP_MOV<0b0000000100010100101001, "movgr2fr.w", FPR32, GPR>;
 def MOVFR2GR_S : FP_MOV<0b0000000100010100101101, "movfr2gr.s", GPR, FPR32>;
+let hasSideEffects = 1 in {
 def MOVGR2FCSR : FP_MOV<0b0000000100010100110000, "movgr2fcsr", FCSR, GPR>;
 def MOVFCSR2GR : FP_MOV<0b0000000100010100110010, "movfcsr2gr", GPR, FCSR>;
+} // hasSideEffects = 1
 def MOVFR2CF_S : FP_MOV<0b0000000100010100110100, "movfr2cf", CFR, FPR32>;
 def MOVCF2FR_S : FP_MOV<0b0000000100010100110101, "movcf2fr", FPR32, CFR>;
 def MOVGR2CF   : FP_MOV<0b0000000100010100110110, "movgr2cf", CFR, GPR>;

diff  --git a/llvm/lib/Target/LoongArch/LoongArchFloat64InstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchFloat64InstrInfo.td
index 5b24f3d8866a02..1a2d13dadd20f7 100644
--- a/llvm/lib/Target/LoongArch/LoongArchFloat64InstrInfo.td
+++ b/llvm/lib/Target/LoongArch/LoongArchFloat64InstrInfo.td
@@ -105,11 +105,11 @@ let isCodeGenOnly = 1 in {
 def MOVFR2GR_S_64 : FP_MOV<0b0000000100010100101101, "movfr2gr.s", GPR, FPR64>;
 def FSEL_D : FP_SEL<0b00001101000000, "fsel", FPR64>;
 } // isCodeGenOnly = 1
-let Constraints = "$dst = $out" in {
+let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Constraints = "$dst = $out" in {
 def MOVGR2FRH_W : FPFmtMOV<0b0000000100010100101011, (outs FPR64:$out),
                            (ins FPR64:$dst, GPR:$src), "movgr2frh.w",
                            "$dst, $src">;
-} // Constraints = "$dst = $out"
+} // hasSideEffects = 0, mayLoad = 0, mayStore = 0, Constraints = "$dst = $out"
 
 // Common Memory Access Instructions
 def FLD_D : FP_LOAD_2RI12<0b0010101110, "fld.d", FPR64>;

diff  --git a/llvm/lib/Target/LoongArch/LoongArchFloatInstrFormats.td b/llvm/lib/Target/LoongArch/LoongArchFloatInstrFormats.td
index 598a5f55c86c44..b14c1882e76277 100644
--- a/llvm/lib/Target/LoongArch/LoongArchFloatInstrFormats.td
+++ b/llvm/lib/Target/LoongArch/LoongArchFloatInstrFormats.td
@@ -157,6 +157,7 @@ class FPFmtMEM<bits<17> op, dag outs, dag ins, string opcstr, string opnstr,
 // Instruction class templates
 //===----------------------------------------------------------------------===//
 
+let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in {
 class FP_ALU_2R<bits<22> op, string opstr, RegisterClass rc>
     : FPFmt2R<op, (outs rc:$fd), (ins rc:$fj), opstr, "$fd, $fj">;
 
@@ -166,6 +167,7 @@ class FP_ALU_3R<bits<17> op, string opstr, RegisterClass rc>
 class FP_ALU_4R<bits<12> op, string opstr, RegisterClass rc>
     : FPFmt4R<op, (outs rc:$fd), (ins rc:$fj, rc:$fk, rc:$fa), opstr,
               "$fd, $fj, $fk, $fa">;
+} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
 
 class FPCMPOpc<bits<12> value> {
   bits<12> val = value;
@@ -175,6 +177,7 @@ class FPCMPCond<bits<5> value> {
   bits<5> val = value;
 }
 
+let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in {
 class FP_CMP<FPCMPOpc op, FPCMPCond cond, string opstr, RegisterClass rc>
     : FPFmtFCMP<op.val, cond.val, (outs CFR:$cd), (ins rc:$fj, rc:$fk), opstr,
                 "$cd, $fj, $fk">;
@@ -195,24 +198,25 @@ class FP_BRANCH<bits<8> opcode, string opstr>
   let isBranch = 1;
   let isTerminator = 1;
 }
+} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
 
-let mayLoad = 1 in {
+let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in {
 class FP_LOAD_3R<bits<17> op, string opstr, RegisterClass rc>
     : FPFmtMEM<op, (outs rc:$fd), (ins GPR:$rj, GPR:$rk), opstr,
                "$fd, $rj, $rk">;
 class FP_LOAD_2RI12<bits<10> op, string opstr, RegisterClass rc>
     : FPFmt2RI12<op, (outs rc:$fd), (ins GPR:$rj, simm12:$imm12), opstr,
                  "$fd, $rj, $imm12">;
-} // mayLoad = 1
+} // hasSideEffects = 0, mayLoad = 1, mayStore = 0
 
-let mayStore = 1 in {
+let hasSideEffects = 0, mayLoad = 0, mayStore = 1 in {
 class FP_STORE_3R<bits<17> op, string opstr, RegisterClass rc>
     : FPFmtMEM<op, (outs), (ins rc:$fd, GPR:$rj, GPR:$rk), opstr,
                "$fd, $rj, $rk">;
 class FP_STORE_2RI12<bits<10> op, string opstr, RegisterClass rc>
     : FPFmt2RI12<op, (outs), (ins rc:$fd, GPR:$rj, simm12:$imm12), opstr,
                  "$fd, $rj, $imm12">;
-} // mayStore = 1
+} // hasSideEffects = 0, mayLoad = 0, mayStore = 1
 
 def FPCMP_OPC_S : FPCMPOpc<0b000011000001>;
 def FPCMP_OPC_D : FPCMPOpc<0b000011000010>;

diff  --git a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
index e530eb1265ffb6..1517d0c0a3dc9c 100644
--- a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
+++ b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
@@ -532,6 +532,7 @@ include "LoongArchLASXInstrFormats.td"
 // Instruction Class Templates
 //===----------------------------------------------------------------------===//
 
+let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in {
 class ALU_3R<bits<17> op, string opstr>
     : Fmt3R<op, (outs GPR:$rd), (ins GPR:$rj, GPR:$rk), opstr, "$rd, $rj, $rk">;
 class ALU_2R<bits<22> op, string opstr>
@@ -557,13 +558,17 @@ class ALU_2RI16<bits<6> op, string opstr, Operand ImmOpnd>
                "$rd, $rj, $imm16">;
 class ALU_1RI20<bits<7> op, string opstr, Operand ImmOpnd>
     : Fmt1RI20<op, (outs GPR:$rd), (ins ImmOpnd:$imm20), opstr, "$rd, $imm20">;
+} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
 
+let hasSideEffects = 1, mayLoad = 0, mayStore = 0 in
 class MISC_I15<bits<17> op, string opstr>
     : FmtI15<op, (outs), (ins uimm15:$imm15), opstr, "$imm15">;
 
+let hasSideEffects = 1, mayLoad = 0, mayStore = 0 in
 class RDTIME_2R<bits<22> op, string opstr>
     : Fmt2R<op, (outs GPR:$rd, GPR:$rj), (ins), opstr, "$rd, $rj">;
 
+let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in {
 class BrCC_2RI16<bits<6> op, string opstr>
     : Fmt2RI16<op, (outs), (ins GPR:$rj, GPR:$rd, simm16_lsl2_br:$imm16), opstr,
                "$rj, $rd, $imm16"> {
@@ -581,8 +586,9 @@ class Br_I26<bits<6> op, string opstr>
   let isBranch = 1;
   let isTerminator = 1;
 }
+} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
 
-let mayLoad = 1 in {
+let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in {
 class LOAD_3R<bits<17> op, string opstr>
     : Fmt3R<op, (outs GPR:$rd), (ins GPR:$rj, GPR:$rk), opstr, "$rd, $rj, $rk">;
 class LOAD_2RI12<bits<10> op, string opstr>
@@ -591,9 +597,9 @@ class LOAD_2RI12<bits<10> op, string opstr>
 class LOAD_2RI14<bits<8> op, string opstr>
     : Fmt2RI14<op, (outs GPR:$rd), (ins GPR:$rj, simm14_lsl2:$imm14), opstr,
                "$rd, $rj, $imm14">;
-} // mayLoad = 1
+} // hasSideEffects = 0, mayLoad = 1, mayStore = 0
 
-let mayStore = 1 in {
+let hasSideEffects = 0, mayLoad = 0, mayStore = 1 in {
 class STORE_3R<bits<17> op, string opstr>
     : Fmt3R<op, (outs), (ins GPR:$rd, GPR:$rj, GPR:$rk), opstr,
             "$rd, $rj, $rk">;
@@ -603,26 +609,28 @@ class STORE_2RI12<bits<10> op, string opstr>
 class STORE_2RI14<bits<8> op, string opstr>
     : Fmt2RI14<op, (outs), (ins GPR:$rd, GPR:$rj, simm14_lsl2:$imm14), opstr,
                "$rd, $rj, $imm14">;
-} // mayStore = 1
+} // hasSideEffects = 0, mayLoad = 0, mayStore = 1
 
-let mayLoad = 1, mayStore = 1, Constraints = "@earlyclobber $rd" in
+let hasSideEffects = 0, mayLoad = 1, mayStore = 1, Constraints = "@earlyclobber $rd" in
 class AM_3R<bits<17> op, string opstr>
     : Fmt3R<op, (outs GPR:$rd), (ins GPR:$rk, GPRMemAtomic:$rj), opstr,
             "$rd, $rk, $rj">;
 
-let mayLoad = 1 in
+let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in
 class LLBase<bits<8> op, string opstr>
     : Fmt2RI14<op, (outs GPR:$rd), (ins GPR:$rj, simm14_lsl2:$imm14), opstr,
                "$rd, $rj, $imm14">;
 
-let mayStore = 1, Constraints = "$rd = $dst" in
+let hasSideEffects = 0, mayLoad = 0, mayStore = 1, Constraints = "$rd = $dst" in
 class SCBase<bits<8> op, string opstr>
     : Fmt2RI14<op, (outs GPR:$dst), (ins GPR:$rd, GPR:$rj, simm14_lsl2:$imm14),
                opstr, "$rd, $rj, $imm14">;
 
+let hasSideEffects = 1 in
 class IOCSRRD<bits<22> op, string opstr>
     : Fmt2R<op, (outs GPR:$rd), (ins GPR:$rj), opstr, "$rd, $rj">;
 
+let hasSideEffects = 1 in
 class IOCSRWR<bits<22> op, string opstr>
     : Fmt2R<op, (outs), (ins GPR:$rd, GPR:$rj), opstr, "$rd, $rj">;
 
@@ -706,8 +714,9 @@ def BEQZ : BrCCZ_1RI21<0b010000, "beqz">;
 def BNEZ : BrCCZ_1RI21<0b010001, "bnez">;
 def B : Br_I26<0b010100, "b">;
 
-let isCall = 1, Defs=[R1] in
+let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isCall = 1, Defs=[R1] in
 def BL : FmtI26<0b010101, (outs), (ins simm26_symbol:$imm26), "bl", "$imm26">;
+let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
 def JIRL : Fmt2RI16<0b010011, (outs GPR:$rd),
                     (ins GPR:$rj, simm16_lsl2:$imm16), "jirl",
                     "$rd, $rj, $imm16">;
@@ -721,6 +730,7 @@ def LD_HU : LOAD_2RI12<0b0010101001, "ld.hu">;
 def ST_B : STORE_2RI12<0b0010100100, "st.b">;
 def ST_H : STORE_2RI12<0b0010100101, "st.h">;
 def ST_W : STORE_2RI12<0b0010100110, "st.w">;
+let hasSideEffects = 0, mayLoad = 1, mayStore = 1 in
 def PRELD : FmtPRELD<(outs), (ins uimm5:$imm5, GPR:$rj, simm12:$imm12), "preld",
                      "$imm5, $rj, $imm12">;
 
@@ -755,6 +765,7 @@ def ADDU16I_D : ALU_2RI16<0b000100, "addu16i.d", simm16>;
 def ALSL_WU : ALU_3RI2<0b000000000000011, "alsl.wu", uimm2_plus1>;
 def ALSL_D  : ALU_3RI2<0b000000000010110, "alsl.d", uimm2_plus1>;
 let Constraints = "$rd = $dst" in {
+let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
 def LU32I_D : Fmt1RI20<0b0001011, (outs GPR:$dst),
                        (ins GPR:$rd, simm20_lu32id:$imm20), "lu32i.d",
                        "$rd, $imm20">;
@@ -824,6 +835,7 @@ def LDPTR_W : LOAD_2RI14<0b00100100, "ldptr.w">;
 def LDPTR_D : LOAD_2RI14<0b00100110, "ldptr.d">;
 def STPTR_W : STORE_2RI14<0b00100101, "stptr.w">;
 def STPTR_D : STORE_2RI14<0b00100111, "stptr.d">;
+let hasSideEffects = 0, mayLoad = 1, mayStore = 1 in
 def PRELDX : FmtPRELDX<(outs), (ins uimm5:$imm5, GPR:$rj, GPR:$rk), "preldx",
                        "$imm5, $rj, $rk">;
 
@@ -1321,7 +1333,7 @@ def PseudoCALLIndirect : Pseudo<(outs), (ins GPR:$rj),
                                 [(loongarch_call GPR:$rj)]>,
                          PseudoInstExpansion<(JIRL R1, GPR:$rj, 0)>;
 
-let isCall = 1, Defs = [R1] in
+let isCall = 1, hasSideEffects = 0, mayStore = 0, mayLoad = 0, Defs = [R1] in
 def PseudoJIRL_CALL : Pseudo<(outs), (ins GPR:$rj, simm16_lsl2:$imm16)>,
                       PseudoInstExpansion<(JIRL R1, GPR:$rj,
                                            simm16_lsl2:$imm16)>;
@@ -1343,11 +1355,13 @@ def PseudoTAILIndirect : Pseudo<(outs), (ins GPRT:$rj),
                                 [(loongarch_tail GPRT:$rj)]>,
                          PseudoInstExpansion<(JIRL R0, GPR:$rj, 0)>;
 
-let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [R3] in
+let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1,
+    hasSideEffects = 0, mayStore = 0, mayLoad = 0, Uses = [R3] in
 def PseudoB_TAIL : Pseudo<(outs), (ins simm26_b:$imm26)>,
                    PseudoInstExpansion<(B simm26_b:$imm26)>;
 
-let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [R3] in
+let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1,
+    hasSideEffects = 0, mayStore = 0, mayLoad = 0, Uses = [R3] in
 def PseudoJIRL_TAIL : Pseudo<(outs), (ins GPR:$rj, simm16_lsl2:$imm16)>,
                       PseudoInstExpansion<(JIRL R0, GPR:$rj,
                                            simm16_lsl2:$imm16)>;
@@ -1934,15 +1948,16 @@ let Predicates = [HasBasicF], usesCustomInserter = 1 in {
 //===----------------------------------------------------------------------===//
 
 // CSR Access Instructions
+let hasSideEffects = 1 in
 def CSRRD : FmtCSR<0b0000010000000, (outs GPR:$rd), (ins uimm14:$csr_num),
                    "csrrd", "$rd, $csr_num">;
-let Constraints = "$rd = $dst" in {
+let hasSideEffects = 1, Constraints = "$rd = $dst" in {
 def CSRWR : FmtCSR<0b0000010000001, (outs GPR:$dst),
                    (ins GPR:$rd, uimm14:$csr_num), "csrwr", "$rd, $csr_num">;
 def CSRXCHG : FmtCSRXCHG<0b00000100, (outs GPR:$dst),
                          (ins GPR:$rd, GPR:$rj, uimm14:$csr_num),
                          "csrxchg", "$rd, $rj, $csr_num">;
-} // Constraints = "$rd = $dst"
+} // hasSideEffects = 1, Constraints = "$rd = $dst"
 
 // IOCSR Access Instructions
 def IOCSRRD_B : IOCSRRD<0b0000011001001000000000, "iocsrrd.b">;
@@ -1957,6 +1972,7 @@ def IOCSRWR_D : IOCSRWR<0b0000011001001000000111, "iocsrwr.d">;
 } // Predicates = [IsLA64]
 
 // TLB Maintenance Instructions
+let hasSideEffects = 1, mayLoad = 0, mayStore = 0 in {
 def TLBSRCH  : FmtI32<0b00000110010010000010100000000000, "tlbsrch">;
 def TLBRD    : FmtI32<0b00000110010010000010110000000000, "tlbrd">;
 def TLBWR    : FmtI32<0b00000110010010000011000000000000, "tlbwr">;
@@ -1965,6 +1981,7 @@ def TLBCLR   : FmtI32<0b00000110010010000010000000000000, "tlbclr">;
 def TLBFLUSH : FmtI32<0b00000110010010000010010000000000, "tlbflush">;
 def INVTLB : FmtINVTLB<(outs), (ins GPR:$rk, GPR:$rj, uimm5:$op), "invtlb",
                        "$op, $rj, $rk">;
+} // hasSideEffects = 1, mayLoad = 0, mayStore = 0
 
 // Software Page Walking Instructions
 def LDDIR : Fmt2RI8<0b00000110010000, (outs GPR:$rd),
@@ -1973,6 +1990,7 @@ def LDPTE : FmtLDPTE<(outs), (ins GPR:$rj, uimm8:$seq), "ldpte", "$rj, $seq">;
 
 
 // Other Miscellaneous Instructions
+let hasSideEffects = 1, mayLoad = 0, mayStore = 0 in
 def ERTN : FmtI32<0b00000110010010000011100000000000, "ertn">;
 def DBCL : MISC_I15<0b00000000001010101, "dbcl">;
 def IDLE : MISC_I15<0b00000110010010001, "idle">;

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw-fp.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw-fp.ll
index 3d7aa871b9c9ff..55ca08b5ed0e12 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw-fp.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw-fp.ll
@@ -10,11 +10,11 @@ define float @float_fadd_acquire(ptr %p) nounwind {
 ; LA64F-NEXT:    fld.s $fa0, $a0, 0
 ; LA64F-NEXT:    addi.w $a1, $zero, 1
 ; LA64F-NEXT:    movgr2fr.w $fa1, $a1
+; LA64F-NEXT:    ffint.s.w $fa1, $fa1
 ; LA64F-NEXT:  .LBB0_1: # %atomicrmw.start
 ; LA64F-NEXT:    # =>This Loop Header: Depth=1
 ; LA64F-NEXT:    # Child Loop BB0_3 Depth 2
-; LA64F-NEXT:    ffint.s.w $fa2, $fa1
-; LA64F-NEXT:    fadd.s $fa2, $fa0, $fa2
+; LA64F-NEXT:    fadd.s $fa2, $fa0, $fa1
 ; LA64F-NEXT:    movfr2gr.s $a1, $fa2
 ; LA64F-NEXT:    movfr2gr.s $a2, $fa0
 ; LA64F-NEXT:  .LBB0_3: # %atomicrmw.start
@@ -45,11 +45,11 @@ define float @float_fadd_acquire(ptr %p) nounwind {
 ; LA64D-NEXT:    fld.s $fa0, $a0, 0
 ; LA64D-NEXT:    addi.w $a1, $zero, 1
 ; LA64D-NEXT:    movgr2fr.w $fa1, $a1
+; LA64D-NEXT:    ffint.s.w $fa1, $fa1
 ; LA64D-NEXT:  .LBB0_1: # %atomicrmw.start
 ; LA64D-NEXT:    # =>This Loop Header: Depth=1
 ; LA64D-NEXT:    # Child Loop BB0_3 Depth 2
-; LA64D-NEXT:    ffint.s.w $fa2, $fa1
-; LA64D-NEXT:    fadd.s $fa2, $fa0, $fa2
+; LA64D-NEXT:    fadd.s $fa2, $fa0, $fa1
 ; LA64D-NEXT:    movfr2gr.s $a1, $fa2
 ; LA64D-NEXT:    movfr2gr.s $a2, $fa0
 ; LA64D-NEXT:  .LBB0_3: # %atomicrmw.start
@@ -158,12 +158,12 @@ define float @float_fmin_acquire(ptr %p) nounwind {
 ; LA64F-NEXT:    fld.s $fa0, $a0, 0
 ; LA64F-NEXT:    addi.w $a1, $zero, 1
 ; LA64F-NEXT:    movgr2fr.w $fa1, $a1
+; LA64F-NEXT:    ffint.s.w $fa1, $fa1
 ; LA64F-NEXT:  .LBB2_1: # %atomicrmw.start
 ; LA64F-NEXT:    # =>This Loop Header: Depth=1
 ; LA64F-NEXT:    # Child Loop BB2_3 Depth 2
-; LA64F-NEXT:    ffint.s.w $fa2, $fa1
-; LA64F-NEXT:    fmax.s $fa3, $fa0, $fa0
-; LA64F-NEXT:    fmin.s $fa2, $fa3, $fa2
+; LA64F-NEXT:    fmax.s $fa2, $fa0, $fa0
+; LA64F-NEXT:    fmin.s $fa2, $fa2, $fa1
 ; LA64F-NEXT:    movfr2gr.s $a1, $fa2
 ; LA64F-NEXT:    movfr2gr.s $a2, $fa0
 ; LA64F-NEXT:  .LBB2_3: # %atomicrmw.start
@@ -194,12 +194,12 @@ define float @float_fmin_acquire(ptr %p) nounwind {
 ; LA64D-NEXT:    fld.s $fa0, $a0, 0
 ; LA64D-NEXT:    addi.w $a1, $zero, 1
 ; LA64D-NEXT:    movgr2fr.w $fa1, $a1
+; LA64D-NEXT:    ffint.s.w $fa1, $fa1
 ; LA64D-NEXT:  .LBB2_1: # %atomicrmw.start
 ; LA64D-NEXT:    # =>This Loop Header: Depth=1
 ; LA64D-NEXT:    # Child Loop BB2_3 Depth 2
-; LA64D-NEXT:    ffint.s.w $fa2, $fa1
-; LA64D-NEXT:    fmax.s $fa3, $fa0, $fa0
-; LA64D-NEXT:    fmin.s $fa2, $fa3, $fa2
+; LA64D-NEXT:    fmax.s $fa2, $fa0, $fa0
+; LA64D-NEXT:    fmin.s $fa2, $fa2, $fa1
 ; LA64D-NEXT:    movfr2gr.s $a1, $fa2
 ; LA64D-NEXT:    movfr2gr.s $a2, $fa0
 ; LA64D-NEXT:  .LBB2_3: # %atomicrmw.start
@@ -234,12 +234,12 @@ define float @float_fmax_acquire(ptr %p) nounwind {
 ; LA64F-NEXT:    fld.s $fa0, $a0, 0
 ; LA64F-NEXT:    addi.w $a1, $zero, 1
 ; LA64F-NEXT:    movgr2fr.w $fa1, $a1
+; LA64F-NEXT:    ffint.s.w $fa1, $fa1
 ; LA64F-NEXT:  .LBB3_1: # %atomicrmw.start
 ; LA64F-NEXT:    # =>This Loop Header: Depth=1
 ; LA64F-NEXT:    # Child Loop BB3_3 Depth 2
-; LA64F-NEXT:    ffint.s.w $fa2, $fa1
-; LA64F-NEXT:    fmax.s $fa3, $fa0, $fa0
-; LA64F-NEXT:    fmax.s $fa2, $fa3, $fa2
+; LA64F-NEXT:    fmax.s $fa2, $fa0, $fa0
+; LA64F-NEXT:    fmax.s $fa2, $fa2, $fa1
 ; LA64F-NEXT:    movfr2gr.s $a1, $fa2
 ; LA64F-NEXT:    movfr2gr.s $a2, $fa0
 ; LA64F-NEXT:  .LBB3_3: # %atomicrmw.start
@@ -270,12 +270,12 @@ define float @float_fmax_acquire(ptr %p) nounwind {
 ; LA64D-NEXT:    fld.s $fa0, $a0, 0
 ; LA64D-NEXT:    addi.w $a1, $zero, 1
 ; LA64D-NEXT:    movgr2fr.w $fa1, $a1
+; LA64D-NEXT:    ffint.s.w $fa1, $fa1
 ; LA64D-NEXT:  .LBB3_1: # %atomicrmw.start
 ; LA64D-NEXT:    # =>This Loop Header: Depth=1
 ; LA64D-NEXT:    # Child Loop BB3_3 Depth 2
-; LA64D-NEXT:    ffint.s.w $fa2, $fa1
-; LA64D-NEXT:    fmax.s $fa3, $fa0, $fa0
-; LA64D-NEXT:    fmax.s $fa2, $fa3, $fa2
+; LA64D-NEXT:    fmax.s $fa2, $fa0, $fa0
+; LA64D-NEXT:    fmax.s $fa2, $fa2, $fa1
 ; LA64D-NEXT:    movfr2gr.s $a1, $fa2
 ; LA64D-NEXT:    movfr2gr.s $a2, $fa0
 ; LA64D-NEXT:  .LBB3_3: # %atomicrmw.start
@@ -307,43 +307,46 @@ define float @float_fmax_acquire(ptr %p) nounwind {
 define double @double_fadd_acquire(ptr %p) nounwind {
 ; LA64F-LABEL: double_fadd_acquire:
 ; LA64F:       # %bb.0:
-; LA64F-NEXT:    addi.d $sp, $sp, -64
-; LA64F-NEXT:    st.d $ra, $sp, 56 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $fp, $sp, 48 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s0, $sp, 40 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s1, $sp, 32 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s2, $sp, 24 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s3, $sp, 16 # 8-byte Folded Spill
+; LA64F-NEXT:    addi.d $sp, $sp, -80
+; LA64F-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s4, $sp, 24 # 8-byte Folded Spill
 ; LA64F-NEXT:    move $fp, $a0
 ; LA64F-NEXT:    ld.d $a0, $a0, 0
-; LA64F-NEXT:    ori $s0, $zero, 8
-; LA64F-NEXT:    addi.d $s1, $sp, 8
-; LA64F-NEXT:    addi.d $s2, $sp, 0
-; LA64F-NEXT:    ori $s3, $zero, 2
+; LA64F-NEXT:    lu52i.d $s0, $zero, 1023
+; LA64F-NEXT:    ori $s1, $zero, 8
+; LA64F-NEXT:    addi.d $s2, $sp, 16
+; LA64F-NEXT:    addi.d $s3, $sp, 8
+; LA64F-NEXT:    ori $s4, $zero, 2
 ; LA64F-NEXT:  .LBB4_1: # %atomicrmw.start
 ; LA64F-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64F-NEXT:    st.d $a0, $sp, 8
-; LA64F-NEXT:    lu52i.d $a1, $zero, 1023
+; LA64F-NEXT:    st.d $a0, $sp, 16
+; LA64F-NEXT:    move $a1, $s0
 ; LA64F-NEXT:    bl %plt(__adddf3)
-; LA64F-NEXT:    st.d $a0, $sp, 0
-; LA64F-NEXT:    move $a0, $s0
+; LA64F-NEXT:    st.d $a0, $sp, 8
+; LA64F-NEXT:    move $a0, $s1
 ; LA64F-NEXT:    move $a1, $fp
-; LA64F-NEXT:    move $a2, $s1
-; LA64F-NEXT:    move $a3, $s2
-; LA64F-NEXT:    move $a4, $s3
-; LA64F-NEXT:    move $a5, $s3
+; LA64F-NEXT:    move $a2, $s2
+; LA64F-NEXT:    move $a3, $s3
+; LA64F-NEXT:    move $a4, $s4
+; LA64F-NEXT:    move $a5, $s4
 ; LA64F-NEXT:    bl %plt(__atomic_compare_exchange)
 ; LA64F-NEXT:    move $a1, $a0
-; LA64F-NEXT:    ld.d $a0, $sp, 8
+; LA64F-NEXT:    ld.d $a0, $sp, 16
 ; LA64F-NEXT:    beqz $a1, .LBB4_1
 ; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64F-NEXT:    ld.d $s3, $sp, 16 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s2, $sp, 24 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s1, $sp, 32 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s0, $sp, 40 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $fp, $sp, 48 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $ra, $sp, 56 # 8-byte Folded Reload
-; LA64F-NEXT:    addi.d $sp, $sp, 64
+; LA64F-NEXT:    ld.d $s4, $sp, 24 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
+; LA64F-NEXT:    addi.d $sp, $sp, 80
 ; LA64F-NEXT:    ret
 ;
 ; LA64D-LABEL: double_fadd_acquire:
@@ -359,7 +362,8 @@ define double @double_fadd_acquire(ptr %p) nounwind {
 ; LA64D-NEXT:    move $fp, $a0
 ; LA64D-NEXT:    fld.d $fa0, $a0, 0
 ; LA64D-NEXT:    addi.d $a0, $zero, 1
-; LA64D-NEXT:    movgr2fr.d $fs0, $a0
+; LA64D-NEXT:    movgr2fr.d $fa1, $a0
+; LA64D-NEXT:    ffint.d.l $fs0, $fa1
 ; LA64D-NEXT:    ori $s0, $zero, 8
 ; LA64D-NEXT:    addi.d $s1, $sp, 16
 ; LA64D-NEXT:    addi.d $s2, $sp, 8
@@ -367,8 +371,7 @@ define double @double_fadd_acquire(ptr %p) nounwind {
 ; LA64D-NEXT:  .LBB4_1: # %atomicrmw.start
 ; LA64D-NEXT:    # =>This Inner Loop Header: Depth=1
 ; LA64D-NEXT:    fst.d $fa0, $sp, 16
-; LA64D-NEXT:    ffint.d.l $fa1, $fs0
-; LA64D-NEXT:    fadd.d $fa0, $fa0, $fa1
+; LA64D-NEXT:    fadd.d $fa0, $fa0, $fs0
 ; LA64D-NEXT:    fst.d $fa0, $sp, 8
 ; LA64D-NEXT:    move $a0, $s0
 ; LA64D-NEXT:    move $a1, $fp
@@ -396,43 +399,46 @@ define double @double_fadd_acquire(ptr %p) nounwind {
 define double @double_fsub_acquire(ptr %p) nounwind {
 ; LA64F-LABEL: double_fsub_acquire:
 ; LA64F:       # %bb.0:
-; LA64F-NEXT:    addi.d $sp, $sp, -64
-; LA64F-NEXT:    st.d $ra, $sp, 56 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $fp, $sp, 48 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s0, $sp, 40 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s1, $sp, 32 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s2, $sp, 24 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s3, $sp, 16 # 8-byte Folded Spill
+; LA64F-NEXT:    addi.d $sp, $sp, -80
+; LA64F-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s4, $sp, 24 # 8-byte Folded Spill
 ; LA64F-NEXT:    move $fp, $a0
 ; LA64F-NEXT:    ld.d $a0, $a0, 0
-; LA64F-NEXT:    ori $s0, $zero, 8
-; LA64F-NEXT:    addi.d $s1, $sp, 8
-; LA64F-NEXT:    addi.d $s2, $sp, 0
-; LA64F-NEXT:    ori $s3, $zero, 2
+; LA64F-NEXT:    lu52i.d $s0, $zero, -1025
+; LA64F-NEXT:    ori $s1, $zero, 8
+; LA64F-NEXT:    addi.d $s2, $sp, 16
+; LA64F-NEXT:    addi.d $s3, $sp, 8
+; LA64F-NEXT:    ori $s4, $zero, 2
 ; LA64F-NEXT:  .LBB5_1: # %atomicrmw.start
 ; LA64F-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64F-NEXT:    st.d $a0, $sp, 8
-; LA64F-NEXT:    lu52i.d $a1, $zero, -1025
+; LA64F-NEXT:    st.d $a0, $sp, 16
+; LA64F-NEXT:    move $a1, $s0
 ; LA64F-NEXT:    bl %plt(__adddf3)
-; LA64F-NEXT:    st.d $a0, $sp, 0
-; LA64F-NEXT:    move $a0, $s0
+; LA64F-NEXT:    st.d $a0, $sp, 8
+; LA64F-NEXT:    move $a0, $s1
 ; LA64F-NEXT:    move $a1, $fp
-; LA64F-NEXT:    move $a2, $s1
-; LA64F-NEXT:    move $a3, $s2
-; LA64F-NEXT:    move $a4, $s3
-; LA64F-NEXT:    move $a5, $s3
+; LA64F-NEXT:    move $a2, $s2
+; LA64F-NEXT:    move $a3, $s3
+; LA64F-NEXT:    move $a4, $s4
+; LA64F-NEXT:    move $a5, $s4
 ; LA64F-NEXT:    bl %plt(__atomic_compare_exchange)
 ; LA64F-NEXT:    move $a1, $a0
-; LA64F-NEXT:    ld.d $a0, $sp, 8
+; LA64F-NEXT:    ld.d $a0, $sp, 16
 ; LA64F-NEXT:    beqz $a1, .LBB5_1
 ; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64F-NEXT:    ld.d $s3, $sp, 16 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s2, $sp, 24 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s1, $sp, 32 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s0, $sp, 40 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $fp, $sp, 48 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $ra, $sp, 56 # 8-byte Folded Reload
-; LA64F-NEXT:    addi.d $sp, $sp, 64
+; LA64F-NEXT:    ld.d $s4, $sp, 24 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
+; LA64F-NEXT:    addi.d $sp, $sp, 80
 ; LA64F-NEXT:    ret
 ;
 ; LA64D-LABEL: double_fsub_acquire:
@@ -485,43 +491,46 @@ define double @double_fsub_acquire(ptr %p) nounwind {
 define double @double_fmin_acquire(ptr %p) nounwind {
 ; LA64F-LABEL: double_fmin_acquire:
 ; LA64F:       # %bb.0:
-; LA64F-NEXT:    addi.d $sp, $sp, -64
-; LA64F-NEXT:    st.d $ra, $sp, 56 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $fp, $sp, 48 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s0, $sp, 40 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s1, $sp, 32 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s2, $sp, 24 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s3, $sp, 16 # 8-byte Folded Spill
+; LA64F-NEXT:    addi.d $sp, $sp, -80
+; LA64F-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s4, $sp, 24 # 8-byte Folded Spill
 ; LA64F-NEXT:    move $fp, $a0
 ; LA64F-NEXT:    ld.d $a0, $a0, 0
-; LA64F-NEXT:    ori $s0, $zero, 8
-; LA64F-NEXT:    addi.d $s1, $sp, 8
-; LA64F-NEXT:    addi.d $s2, $sp, 0
-; LA64F-NEXT:    ori $s3, $zero, 2
+; LA64F-NEXT:    lu52i.d $s0, $zero, 1023
+; LA64F-NEXT:    ori $s1, $zero, 8
+; LA64F-NEXT:    addi.d $s2, $sp, 16
+; LA64F-NEXT:    addi.d $s3, $sp, 8
+; LA64F-NEXT:    ori $s4, $zero, 2
 ; LA64F-NEXT:  .LBB6_1: # %atomicrmw.start
 ; LA64F-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64F-NEXT:    st.d $a0, $sp, 8
-; LA64F-NEXT:    lu52i.d $a1, $zero, 1023
+; LA64F-NEXT:    st.d $a0, $sp, 16
+; LA64F-NEXT:    move $a1, $s0
 ; LA64F-NEXT:    bl %plt(fmin)
-; LA64F-NEXT:    st.d $a0, $sp, 0
-; LA64F-NEXT:    move $a0, $s0
+; LA64F-NEXT:    st.d $a0, $sp, 8
+; LA64F-NEXT:    move $a0, $s1
 ; LA64F-NEXT:    move $a1, $fp
-; LA64F-NEXT:    move $a2, $s1
-; LA64F-NEXT:    move $a3, $s2
-; LA64F-NEXT:    move $a4, $s3
-; LA64F-NEXT:    move $a5, $s3
+; LA64F-NEXT:    move $a2, $s2
+; LA64F-NEXT:    move $a3, $s3
+; LA64F-NEXT:    move $a4, $s4
+; LA64F-NEXT:    move $a5, $s4
 ; LA64F-NEXT:    bl %plt(__atomic_compare_exchange)
 ; LA64F-NEXT:    move $a1, $a0
-; LA64F-NEXT:    ld.d $a0, $sp, 8
+; LA64F-NEXT:    ld.d $a0, $sp, 16
 ; LA64F-NEXT:    beqz $a1, .LBB6_1
 ; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64F-NEXT:    ld.d $s3, $sp, 16 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s2, $sp, 24 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s1, $sp, 32 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s0, $sp, 40 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $fp, $sp, 48 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $ra, $sp, 56 # 8-byte Folded Reload
-; LA64F-NEXT:    addi.d $sp, $sp, 64
+; LA64F-NEXT:    ld.d $s4, $sp, 24 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
+; LA64F-NEXT:    addi.d $sp, $sp, 80
 ; LA64F-NEXT:    ret
 ;
 ; LA64D-LABEL: double_fmin_acquire:
@@ -537,7 +546,8 @@ define double @double_fmin_acquire(ptr %p) nounwind {
 ; LA64D-NEXT:    move $fp, $a0
 ; LA64D-NEXT:    fld.d $fa0, $a0, 0
 ; LA64D-NEXT:    addi.d $a0, $zero, 1
-; LA64D-NEXT:    movgr2fr.d $fs0, $a0
+; LA64D-NEXT:    movgr2fr.d $fa1, $a0
+; LA64D-NEXT:    ffint.d.l $fs0, $fa1
 ; LA64D-NEXT:    ori $s0, $zero, 8
 ; LA64D-NEXT:    addi.d $s1, $sp, 16
 ; LA64D-NEXT:    addi.d $s2, $sp, 8
@@ -546,8 +556,7 @@ define double @double_fmin_acquire(ptr %p) nounwind {
 ; LA64D-NEXT:    # =>This Inner Loop Header: Depth=1
 ; LA64D-NEXT:    fst.d $fa0, $sp, 16
 ; LA64D-NEXT:    fmax.d $fa0, $fa0, $fa0
-; LA64D-NEXT:    ffint.d.l $fa1, $fs0
-; LA64D-NEXT:    fmin.d $fa0, $fa0, $fa1
+; LA64D-NEXT:    fmin.d $fa0, $fa0, $fs0
 ; LA64D-NEXT:    fst.d $fa0, $sp, 8
 ; LA64D-NEXT:    move $a0, $s0
 ; LA64D-NEXT:    move $a1, $fp
@@ -575,43 +584,46 @@ define double @double_fmin_acquire(ptr %p) nounwind {
 define double @double_fmax_acquire(ptr %p) nounwind {
 ; LA64F-LABEL: double_fmax_acquire:
 ; LA64F:       # %bb.0:
-; LA64F-NEXT:    addi.d $sp, $sp, -64
-; LA64F-NEXT:    st.d $ra, $sp, 56 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $fp, $sp, 48 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s0, $sp, 40 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s1, $sp, 32 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s2, $sp, 24 # 8-byte Folded Spill
-; LA64F-NEXT:    st.d $s3, $sp, 16 # 8-byte Folded Spill
+; LA64F-NEXT:    addi.d $sp, $sp, -80
+; LA64F-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s4, $sp, 24 # 8-byte Folded Spill
 ; LA64F-NEXT:    move $fp, $a0
 ; LA64F-NEXT:    ld.d $a0, $a0, 0
-; LA64F-NEXT:    ori $s0, $zero, 8
-; LA64F-NEXT:    addi.d $s1, $sp, 8
-; LA64F-NEXT:    addi.d $s2, $sp, 0
-; LA64F-NEXT:    ori $s3, $zero, 2
+; LA64F-NEXT:    lu52i.d $s0, $zero, 1023
+; LA64F-NEXT:    ori $s1, $zero, 8
+; LA64F-NEXT:    addi.d $s2, $sp, 16
+; LA64F-NEXT:    addi.d $s3, $sp, 8
+; LA64F-NEXT:    ori $s4, $zero, 2
 ; LA64F-NEXT:  .LBB7_1: # %atomicrmw.start
 ; LA64F-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64F-NEXT:    st.d $a0, $sp, 8
-; LA64F-NEXT:    lu52i.d $a1, $zero, 1023
+; LA64F-NEXT:    st.d $a0, $sp, 16
+; LA64F-NEXT:    move $a1, $s0
 ; LA64F-NEXT:    bl %plt(fmax)
-; LA64F-NEXT:    st.d $a0, $sp, 0
-; LA64F-NEXT:    move $a0, $s0
+; LA64F-NEXT:    st.d $a0, $sp, 8
+; LA64F-NEXT:    move $a0, $s1
 ; LA64F-NEXT:    move $a1, $fp
-; LA64F-NEXT:    move $a2, $s1
-; LA64F-NEXT:    move $a3, $s2
-; LA64F-NEXT:    move $a4, $s3
-; LA64F-NEXT:    move $a5, $s3
+; LA64F-NEXT:    move $a2, $s2
+; LA64F-NEXT:    move $a3, $s3
+; LA64F-NEXT:    move $a4, $s4
+; LA64F-NEXT:    move $a5, $s4
 ; LA64F-NEXT:    bl %plt(__atomic_compare_exchange)
 ; LA64F-NEXT:    move $a1, $a0
-; LA64F-NEXT:    ld.d $a0, $sp, 8
+; LA64F-NEXT:    ld.d $a0, $sp, 16
 ; LA64F-NEXT:    beqz $a1, .LBB7_1
 ; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64F-NEXT:    ld.d $s3, $sp, 16 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s2, $sp, 24 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s1, $sp, 32 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $s0, $sp, 40 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $fp, $sp, 48 # 8-byte Folded Reload
-; LA64F-NEXT:    ld.d $ra, $sp, 56 # 8-byte Folded Reload
-; LA64F-NEXT:    addi.d $sp, $sp, 64
+; LA64F-NEXT:    ld.d $s4, $sp, 24 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
+; LA64F-NEXT:    addi.d $sp, $sp, 80
 ; LA64F-NEXT:    ret
 ;
 ; LA64D-LABEL: double_fmax_acquire:
@@ -627,7 +639,8 @@ define double @double_fmax_acquire(ptr %p) nounwind {
 ; LA64D-NEXT:    move $fp, $a0
 ; LA64D-NEXT:    fld.d $fa0, $a0, 0
 ; LA64D-NEXT:    addi.d $a0, $zero, 1
-; LA64D-NEXT:    movgr2fr.d $fs0, $a0
+; LA64D-NEXT:    movgr2fr.d $fa1, $a0
+; LA64D-NEXT:    ffint.d.l $fs0, $fa1
 ; LA64D-NEXT:    ori $s0, $zero, 8
 ; LA64D-NEXT:    addi.d $s1, $sp, 16
 ; LA64D-NEXT:    addi.d $s2, $sp, 8
@@ -636,8 +649,7 @@ define double @double_fmax_acquire(ptr %p) nounwind {
 ; LA64D-NEXT:    # =>This Inner Loop Header: Depth=1
 ; LA64D-NEXT:    fst.d $fa0, $sp, 16
 ; LA64D-NEXT:    fmax.d $fa0, $fa0, $fa0
-; LA64D-NEXT:    ffint.d.l $fa1, $fs0
-; LA64D-NEXT:    fmax.d $fa0, $fa0, $fa1
+; LA64D-NEXT:    fmax.d $fa0, $fa0, $fs0
 ; LA64D-NEXT:    fst.d $fa0, $sp, 8
 ; LA64D-NEXT:    move $a0, $s0
 ; LA64D-NEXT:    move $a1, $fp

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/fcmp-dbl.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/fcmp-dbl.ll
index 8058f7b0810ce3..3db98d20fbf11d 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/fcmp-dbl.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/fcmp-dbl.ll
@@ -268,8 +268,6 @@ define i1 @fcmp_fast_olt(double %a, double %b, i1 %c) nounwind {
 ; LA32-NEXT:  # %bb.1: # %if.then
 ; LA32-NEXT:    ret
 ; LA32-NEXT:  .LBB16_2: # %if.else
-; LA32-NEXT:    movgr2fr.w $fa1, $zero
-; LA32-NEXT:    movgr2frh.w $fa1, $zero
 ; LA32-NEXT:    fcmp.clt.d $fcc0, $fa0, $fa1
 ; LA32-NEXT:    movcf2gr $a0, $fcc0
 ; LA32-NEXT:    ret
@@ -308,9 +306,6 @@ define i1 @fcmp_fast_oeq(double %a, double %b, i1 %c) nounwind {
 ; LA32-NEXT:  # %bb.1: # %if.then
 ; LA32-NEXT:    ret
 ; LA32-NEXT:  .LBB17_2: # %if.else
-; LA32-NEXT:    movgr2fr.w $fa1, $zero
-; LA32-NEXT:    movgr2frh.w $fa1, $zero
-; LA32-NEXT:    fcmp.ceq.d $fcc0, $fa0, $fa1
 ; LA32-NEXT:    movcf2gr $a0, $fcc0
 ; LA32-NEXT:    ret
 ;
@@ -346,8 +341,6 @@ define i1 @fcmp_fast_ole(double %a, double %b, i1 %c) nounwind {
 ; LA32-NEXT:  # %bb.1: # %if.then
 ; LA32-NEXT:    ret
 ; LA32-NEXT:  .LBB18_2: # %if.else
-; LA32-NEXT:    movgr2fr.w $fa1, $zero
-; LA32-NEXT:    movgr2frh.w $fa1, $zero
 ; LA32-NEXT:    fcmp.cle.d $fcc0, $fa0, $fa1
 ; LA32-NEXT:    movcf2gr $a0, $fcc0
 ; LA32-NEXT:    ret


        


More information about the llvm-commits mailing list