[llvm] 37b93f0 - [LoongArch] Add some atomic tests (#68766)

via llvm-commits llvm-commits at lists.llvm.org
Wed Oct 11 03:28:09 PDT 2023


Author: hev
Date: 2023-10-11T18:28:04+08:00
New Revision: 37b93f07cd7ba2b1e6e81116cd49d34396b7b70a

URL: https://github.com/llvm/llvm-project/commit/37b93f07cd7ba2b1e6e81116cd49d34396b7b70a
DIFF: https://github.com/llvm/llvm-project/commit/37b93f07cd7ba2b1e6e81116cd49d34396b7b70a.diff

LOG: [LoongArch] Add some atomic tests (#68766)

Added: 
    llvm/test/CodeGen/LoongArch/ir-instruction/fence-singlethread.ll

Modified: 
    llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw-fp.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw-minmax.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/load-store-atomic.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw-fp.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw-fp.ll
index 9a29d67e998276f..02d481cb3865a77 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw-fp.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw-fp.ll
@@ -2,8 +2,6 @@
 ; RUN: llc --mtriple=loongarch64 --mattr=+f,-d < %s | FileCheck %s --check-prefix=LA64F
 ; RUN: llc --mtriple=loongarch64 --mattr=+d < %s | FileCheck %s --check-prefix=LA64D
 
-;; Add more test cases after supporting 
diff erent AtomicOrdering.
-
 define float @float_fadd_acquire(ptr %p) nounwind {
 ; LA64F-LABEL: float_fadd_acquire:
 ; LA64F:       # %bb.0:
@@ -681,3 +679,2715 @@ define double @double_fmax_acquire(ptr %p) nounwind {
   %v = atomicrmw fmax ptr %p, double 1.0 acquire, align 4
   ret double %v
 }
+
+define float @float_fadd_release(ptr %p) nounwind {
+; LA64F-LABEL: float_fadd_release:
+; LA64F:       # %bb.0:
+; LA64F-NEXT:    fld.s $fa0, $a0, 0
+; LA64F-NEXT:    addi.w $a1, $zero, 1
+; LA64F-NEXT:    movgr2fr.w $fa1, $a1
+; LA64F-NEXT:    ffint.s.w $fa1, $fa1
+; LA64F-NEXT:    .p2align 4, , 16
+; LA64F-NEXT:  .LBB8_1: # %atomicrmw.start
+; LA64F-NEXT:    # =>This Loop Header: Depth=1
+; LA64F-NEXT:    # Child Loop BB8_3 Depth 2
+; LA64F-NEXT:    fadd.s $fa2, $fa0, $fa1
+; LA64F-NEXT:    movfr2gr.s $a1, $fa2
+; LA64F-NEXT:    movfr2gr.s $a2, $fa0
+; LA64F-NEXT:  .LBB8_3: # %atomicrmw.start
+; LA64F-NEXT:    # Parent Loop BB8_1 Depth=1
+; LA64F-NEXT:    # => This Inner Loop Header: Depth=2
+; LA64F-NEXT:    ll.w $a3, $a0, 0
+; LA64F-NEXT:    bne $a3, $a2, .LBB8_5
+; LA64F-NEXT:  # %bb.4: # %atomicrmw.start
+; LA64F-NEXT:    # in Loop: Header=BB8_3 Depth=2
+; LA64F-NEXT:    move $a4, $a1
+; LA64F-NEXT:    sc.w $a4, $a0, 0
+; LA64F-NEXT:    beqz $a4, .LBB8_3
+; LA64F-NEXT:    b .LBB8_6
+; LA64F-NEXT:  .LBB8_5: # %atomicrmw.start
+; LA64F-NEXT:    # in Loop: Header=BB8_1 Depth=1
+; LA64F-NEXT:    dbar 1792
+; LA64F-NEXT:  .LBB8_6: # %atomicrmw.start
+; LA64F-NEXT:    # in Loop: Header=BB8_1 Depth=1
+; LA64F-NEXT:    movgr2fr.w $fa0, $a3
+; LA64F-NEXT:    addi.w $a1, $a2, 0
+; LA64F-NEXT:    bne $a3, $a1, .LBB8_1
+; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64F-NEXT:    ret
+;
+; LA64D-LABEL: float_fadd_release:
+; LA64D:       # %bb.0:
+; LA64D-NEXT:    fld.s $fa0, $a0, 0
+; LA64D-NEXT:    addi.w $a1, $zero, 1
+; LA64D-NEXT:    movgr2fr.w $fa1, $a1
+; LA64D-NEXT:    ffint.s.w $fa1, $fa1
+; LA64D-NEXT:    .p2align 4, , 16
+; LA64D-NEXT:  .LBB8_1: # %atomicrmw.start
+; LA64D-NEXT:    # =>This Loop Header: Depth=1
+; LA64D-NEXT:    # Child Loop BB8_3 Depth 2
+; LA64D-NEXT:    fadd.s $fa2, $fa0, $fa1
+; LA64D-NEXT:    movfr2gr.s $a1, $fa2
+; LA64D-NEXT:    movfr2gr.s $a2, $fa0
+; LA64D-NEXT:  .LBB8_3: # %atomicrmw.start
+; LA64D-NEXT:    # Parent Loop BB8_1 Depth=1
+; LA64D-NEXT:    # => This Inner Loop Header: Depth=2
+; LA64D-NEXT:    ll.w $a3, $a0, 0
+; LA64D-NEXT:    bne $a3, $a2, .LBB8_5
+; LA64D-NEXT:  # %bb.4: # %atomicrmw.start
+; LA64D-NEXT:    # in Loop: Header=BB8_3 Depth=2
+; LA64D-NEXT:    move $a4, $a1
+; LA64D-NEXT:    sc.w $a4, $a0, 0
+; LA64D-NEXT:    beqz $a4, .LBB8_3
+; LA64D-NEXT:    b .LBB8_6
+; LA64D-NEXT:  .LBB8_5: # %atomicrmw.start
+; LA64D-NEXT:    # in Loop: Header=BB8_1 Depth=1
+; LA64D-NEXT:    dbar 1792
+; LA64D-NEXT:  .LBB8_6: # %atomicrmw.start
+; LA64D-NEXT:    # in Loop: Header=BB8_1 Depth=1
+; LA64D-NEXT:    movgr2fr.w $fa0, $a3
+; LA64D-NEXT:    addi.w $a1, $a2, 0
+; LA64D-NEXT:    bne $a3, $a1, .LBB8_1
+; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64D-NEXT:    ret
+  %v = atomicrmw fadd ptr %p, float 1.0 release, align 4
+  ret float %v
+}
+
+define float @float_fsub_release(ptr %p) nounwind {
+; LA64F-LABEL: float_fsub_release:
+; LA64F:       # %bb.0:
+; LA64F-NEXT:    fld.s $fa0, $a0, 0
+; LA64F-NEXT:    pcalau12i $a1, %pc_hi20(.LCPI9_0)
+; LA64F-NEXT:    addi.d $a1, $a1, %pc_lo12(.LCPI9_0)
+; LA64F-NEXT:    fld.s $fa1, $a1, 0
+; LA64F-NEXT:    .p2align 4, , 16
+; LA64F-NEXT:  .LBB9_1: # %atomicrmw.start
+; LA64F-NEXT:    # =>This Loop Header: Depth=1
+; LA64F-NEXT:    # Child Loop BB9_3 Depth 2
+; LA64F-NEXT:    fadd.s $fa2, $fa0, $fa1
+; LA64F-NEXT:    movfr2gr.s $a1, $fa2
+; LA64F-NEXT:    movfr2gr.s $a2, $fa0
+; LA64F-NEXT:  .LBB9_3: # %atomicrmw.start
+; LA64F-NEXT:    # Parent Loop BB9_1 Depth=1
+; LA64F-NEXT:    # => This Inner Loop Header: Depth=2
+; LA64F-NEXT:    ll.w $a3, $a0, 0
+; LA64F-NEXT:    bne $a3, $a2, .LBB9_5
+; LA64F-NEXT:  # %bb.4: # %atomicrmw.start
+; LA64F-NEXT:    # in Loop: Header=BB9_3 Depth=2
+; LA64F-NEXT:    move $a4, $a1
+; LA64F-NEXT:    sc.w $a4, $a0, 0
+; LA64F-NEXT:    beqz $a4, .LBB9_3
+; LA64F-NEXT:    b .LBB9_6
+; LA64F-NEXT:  .LBB9_5: # %atomicrmw.start
+; LA64F-NEXT:    # in Loop: Header=BB9_1 Depth=1
+; LA64F-NEXT:    dbar 1792
+; LA64F-NEXT:  .LBB9_6: # %atomicrmw.start
+; LA64F-NEXT:    # in Loop: Header=BB9_1 Depth=1
+; LA64F-NEXT:    movgr2fr.w $fa0, $a3
+; LA64F-NEXT:    addi.w $a1, $a2, 0
+; LA64F-NEXT:    bne $a3, $a1, .LBB9_1
+; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64F-NEXT:    ret
+;
+; LA64D-LABEL: float_fsub_release:
+; LA64D:       # %bb.0:
+; LA64D-NEXT:    fld.s $fa0, $a0, 0
+; LA64D-NEXT:    pcalau12i $a1, %pc_hi20(.LCPI9_0)
+; LA64D-NEXT:    addi.d $a1, $a1, %pc_lo12(.LCPI9_0)
+; LA64D-NEXT:    fld.s $fa1, $a1, 0
+; LA64D-NEXT:    .p2align 4, , 16
+; LA64D-NEXT:  .LBB9_1: # %atomicrmw.start
+; LA64D-NEXT:    # =>This Loop Header: Depth=1
+; LA64D-NEXT:    # Child Loop BB9_3 Depth 2
+; LA64D-NEXT:    fadd.s $fa2, $fa0, $fa1
+; LA64D-NEXT:    movfr2gr.s $a1, $fa2
+; LA64D-NEXT:    movfr2gr.s $a2, $fa0
+; LA64D-NEXT:  .LBB9_3: # %atomicrmw.start
+; LA64D-NEXT:    # Parent Loop BB9_1 Depth=1
+; LA64D-NEXT:    # => This Inner Loop Header: Depth=2
+; LA64D-NEXT:    ll.w $a3, $a0, 0
+; LA64D-NEXT:    bne $a3, $a2, .LBB9_5
+; LA64D-NEXT:  # %bb.4: # %atomicrmw.start
+; LA64D-NEXT:    # in Loop: Header=BB9_3 Depth=2
+; LA64D-NEXT:    move $a4, $a1
+; LA64D-NEXT:    sc.w $a4, $a0, 0
+; LA64D-NEXT:    beqz $a4, .LBB9_3
+; LA64D-NEXT:    b .LBB9_6
+; LA64D-NEXT:  .LBB9_5: # %atomicrmw.start
+; LA64D-NEXT:    # in Loop: Header=BB9_1 Depth=1
+; LA64D-NEXT:    dbar 1792
+; LA64D-NEXT:  .LBB9_6: # %atomicrmw.start
+; LA64D-NEXT:    # in Loop: Header=BB9_1 Depth=1
+; LA64D-NEXT:    movgr2fr.w $fa0, $a3
+; LA64D-NEXT:    addi.w $a1, $a2, 0
+; LA64D-NEXT:    bne $a3, $a1, .LBB9_1
+; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64D-NEXT:    ret
+  %v = atomicrmw fsub ptr %p, float 1.0 release, align 4
+  ret float %v
+}
+
+define float @float_fmin_release(ptr %p) nounwind {
+; LA64F-LABEL: float_fmin_release:
+; LA64F:       # %bb.0:
+; LA64F-NEXT:    fld.s $fa0, $a0, 0
+; LA64F-NEXT:    addi.w $a1, $zero, 1
+; LA64F-NEXT:    movgr2fr.w $fa1, $a1
+; LA64F-NEXT:    ffint.s.w $fa1, $fa1
+; LA64F-NEXT:    .p2align 4, , 16
+; LA64F-NEXT:  .LBB10_1: # %atomicrmw.start
+; LA64F-NEXT:    # =>This Loop Header: Depth=1
+; LA64F-NEXT:    # Child Loop BB10_3 Depth 2
+; LA64F-NEXT:    fmax.s $fa2, $fa0, $fa0
+; LA64F-NEXT:    fmin.s $fa2, $fa2, $fa1
+; LA64F-NEXT:    movfr2gr.s $a1, $fa2
+; LA64F-NEXT:    movfr2gr.s $a2, $fa0
+; LA64F-NEXT:  .LBB10_3: # %atomicrmw.start
+; LA64F-NEXT:    # Parent Loop BB10_1 Depth=1
+; LA64F-NEXT:    # => This Inner Loop Header: Depth=2
+; LA64F-NEXT:    ll.w $a3, $a0, 0
+; LA64F-NEXT:    bne $a3, $a2, .LBB10_5
+; LA64F-NEXT:  # %bb.4: # %atomicrmw.start
+; LA64F-NEXT:    # in Loop: Header=BB10_3 Depth=2
+; LA64F-NEXT:    move $a4, $a1
+; LA64F-NEXT:    sc.w $a4, $a0, 0
+; LA64F-NEXT:    beqz $a4, .LBB10_3
+; LA64F-NEXT:    b .LBB10_6
+; LA64F-NEXT:  .LBB10_5: # %atomicrmw.start
+; LA64F-NEXT:    # in Loop: Header=BB10_1 Depth=1
+; LA64F-NEXT:    dbar 1792
+; LA64F-NEXT:  .LBB10_6: # %atomicrmw.start
+; LA64F-NEXT:    # in Loop: Header=BB10_1 Depth=1
+; LA64F-NEXT:    movgr2fr.w $fa0, $a3
+; LA64F-NEXT:    addi.w $a1, $a2, 0
+; LA64F-NEXT:    bne $a3, $a1, .LBB10_1
+; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64F-NEXT:    ret
+;
+; LA64D-LABEL: float_fmin_release:
+; LA64D:       # %bb.0:
+; LA64D-NEXT:    fld.s $fa0, $a0, 0
+; LA64D-NEXT:    addi.w $a1, $zero, 1
+; LA64D-NEXT:    movgr2fr.w $fa1, $a1
+; LA64D-NEXT:    ffint.s.w $fa1, $fa1
+; LA64D-NEXT:    .p2align 4, , 16
+; LA64D-NEXT:  .LBB10_1: # %atomicrmw.start
+; LA64D-NEXT:    # =>This Loop Header: Depth=1
+; LA64D-NEXT:    # Child Loop BB10_3 Depth 2
+; LA64D-NEXT:    fmax.s $fa2, $fa0, $fa0
+; LA64D-NEXT:    fmin.s $fa2, $fa2, $fa1
+; LA64D-NEXT:    movfr2gr.s $a1, $fa2
+; LA64D-NEXT:    movfr2gr.s $a2, $fa0
+; LA64D-NEXT:  .LBB10_3: # %atomicrmw.start
+; LA64D-NEXT:    # Parent Loop BB10_1 Depth=1
+; LA64D-NEXT:    # => This Inner Loop Header: Depth=2
+; LA64D-NEXT:    ll.w $a3, $a0, 0
+; LA64D-NEXT:    bne $a3, $a2, .LBB10_5
+; LA64D-NEXT:  # %bb.4: # %atomicrmw.start
+; LA64D-NEXT:    # in Loop: Header=BB10_3 Depth=2
+; LA64D-NEXT:    move $a4, $a1
+; LA64D-NEXT:    sc.w $a4, $a0, 0
+; LA64D-NEXT:    beqz $a4, .LBB10_3
+; LA64D-NEXT:    b .LBB10_6
+; LA64D-NEXT:  .LBB10_5: # %atomicrmw.start
+; LA64D-NEXT:    # in Loop: Header=BB10_1 Depth=1
+; LA64D-NEXT:    dbar 1792
+; LA64D-NEXT:  .LBB10_6: # %atomicrmw.start
+; LA64D-NEXT:    # in Loop: Header=BB10_1 Depth=1
+; LA64D-NEXT:    movgr2fr.w $fa0, $a3
+; LA64D-NEXT:    addi.w $a1, $a2, 0
+; LA64D-NEXT:    bne $a3, $a1, .LBB10_1
+; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64D-NEXT:    ret
+  %v = atomicrmw fmin ptr %p, float 1.0 release, align 4
+  ret float %v
+}
+
+define float @float_fmax_release(ptr %p) nounwind {
+; LA64F-LABEL: float_fmax_release:
+; LA64F:       # %bb.0:
+; LA64F-NEXT:    fld.s $fa0, $a0, 0
+; LA64F-NEXT:    addi.w $a1, $zero, 1
+; LA64F-NEXT:    movgr2fr.w $fa1, $a1
+; LA64F-NEXT:    ffint.s.w $fa1, $fa1
+; LA64F-NEXT:    .p2align 4, , 16
+; LA64F-NEXT:  .LBB11_1: # %atomicrmw.start
+; LA64F-NEXT:    # =>This Loop Header: Depth=1
+; LA64F-NEXT:    # Child Loop BB11_3 Depth 2
+; LA64F-NEXT:    fmax.s $fa2, $fa0, $fa0
+; LA64F-NEXT:    fmax.s $fa2, $fa2, $fa1
+; LA64F-NEXT:    movfr2gr.s $a1, $fa2
+; LA64F-NEXT:    movfr2gr.s $a2, $fa0
+; LA64F-NEXT:  .LBB11_3: # %atomicrmw.start
+; LA64F-NEXT:    # Parent Loop BB11_1 Depth=1
+; LA64F-NEXT:    # => This Inner Loop Header: Depth=2
+; LA64F-NEXT:    ll.w $a3, $a0, 0
+; LA64F-NEXT:    bne $a3, $a2, .LBB11_5
+; LA64F-NEXT:  # %bb.4: # %atomicrmw.start
+; LA64F-NEXT:    # in Loop: Header=BB11_3 Depth=2
+; LA64F-NEXT:    move $a4, $a1
+; LA64F-NEXT:    sc.w $a4, $a0, 0
+; LA64F-NEXT:    beqz $a4, .LBB11_3
+; LA64F-NEXT:    b .LBB11_6
+; LA64F-NEXT:  .LBB11_5: # %atomicrmw.start
+; LA64F-NEXT:    # in Loop: Header=BB11_1 Depth=1
+; LA64F-NEXT:    dbar 1792
+; LA64F-NEXT:  .LBB11_6: # %atomicrmw.start
+; LA64F-NEXT:    # in Loop: Header=BB11_1 Depth=1
+; LA64F-NEXT:    movgr2fr.w $fa0, $a3
+; LA64F-NEXT:    addi.w $a1, $a2, 0
+; LA64F-NEXT:    bne $a3, $a1, .LBB11_1
+; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64F-NEXT:    ret
+;
+; LA64D-LABEL: float_fmax_release:
+; LA64D:       # %bb.0:
+; LA64D-NEXT:    fld.s $fa0, $a0, 0
+; LA64D-NEXT:    addi.w $a1, $zero, 1
+; LA64D-NEXT:    movgr2fr.w $fa1, $a1
+; LA64D-NEXT:    ffint.s.w $fa1, $fa1
+; LA64D-NEXT:    .p2align 4, , 16
+; LA64D-NEXT:  .LBB11_1: # %atomicrmw.start
+; LA64D-NEXT:    # =>This Loop Header: Depth=1
+; LA64D-NEXT:    # Child Loop BB11_3 Depth 2
+; LA64D-NEXT:    fmax.s $fa2, $fa0, $fa0
+; LA64D-NEXT:    fmax.s $fa2, $fa2, $fa1
+; LA64D-NEXT:    movfr2gr.s $a1, $fa2
+; LA64D-NEXT:    movfr2gr.s $a2, $fa0
+; LA64D-NEXT:  .LBB11_3: # %atomicrmw.start
+; LA64D-NEXT:    # Parent Loop BB11_1 Depth=1
+; LA64D-NEXT:    # => This Inner Loop Header: Depth=2
+; LA64D-NEXT:    ll.w $a3, $a0, 0
+; LA64D-NEXT:    bne $a3, $a2, .LBB11_5
+; LA64D-NEXT:  # %bb.4: # %atomicrmw.start
+; LA64D-NEXT:    # in Loop: Header=BB11_3 Depth=2
+; LA64D-NEXT:    move $a4, $a1
+; LA64D-NEXT:    sc.w $a4, $a0, 0
+; LA64D-NEXT:    beqz $a4, .LBB11_3
+; LA64D-NEXT:    b .LBB11_6
+; LA64D-NEXT:  .LBB11_5: # %atomicrmw.start
+; LA64D-NEXT:    # in Loop: Header=BB11_1 Depth=1
+; LA64D-NEXT:    dbar 1792
+; LA64D-NEXT:  .LBB11_6: # %atomicrmw.start
+; LA64D-NEXT:    # in Loop: Header=BB11_1 Depth=1
+; LA64D-NEXT:    movgr2fr.w $fa0, $a3
+; LA64D-NEXT:    addi.w $a1, $a2, 0
+; LA64D-NEXT:    bne $a3, $a1, .LBB11_1
+; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64D-NEXT:    ret
+  %v = atomicrmw fmax ptr %p, float 1.0 release, align 4
+  ret float %v
+}
+
+define double @double_fadd_release(ptr %p) nounwind {
+; LA64F-LABEL: double_fadd_release:
+; LA64F:       # %bb.0:
+; LA64F-NEXT:    addi.d $sp, $sp, -80
+; LA64F-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s4, $sp, 24 # 8-byte Folded Spill
+; LA64F-NEXT:    move $fp, $a0
+; LA64F-NEXT:    ld.d $a0, $a0, 0
+; LA64F-NEXT:    lu52i.d $s0, $zero, 1023
+; LA64F-NEXT:    ori $s1, $zero, 8
+; LA64F-NEXT:    addi.d $s2, $sp, 16
+; LA64F-NEXT:    addi.d $s3, $sp, 8
+; LA64F-NEXT:    ori $s4, $zero, 3
+; LA64F-NEXT:    .p2align 4, , 16
+; LA64F-NEXT:  .LBB12_1: # %atomicrmw.start
+; LA64F-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64F-NEXT:    st.d $a0, $sp, 16
+; LA64F-NEXT:    move $a1, $s0
+; LA64F-NEXT:    bl %plt(__adddf3)
+; LA64F-NEXT:    st.d $a0, $sp, 8
+; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    move $a1, $fp
+; LA64F-NEXT:    move $a2, $s2
+; LA64F-NEXT:    move $a3, $s3
+; LA64F-NEXT:    move $a4, $s4
+; LA64F-NEXT:    move $a5, $zero
+; LA64F-NEXT:    bl %plt(__atomic_compare_exchange)
+; LA64F-NEXT:    move $a1, $a0
+; LA64F-NEXT:    ld.d $a0, $sp, 16
+; LA64F-NEXT:    beqz $a1, .LBB12_1
+; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64F-NEXT:    ld.d $s4, $sp, 24 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
+; LA64F-NEXT:    addi.d $sp, $sp, 80
+; LA64F-NEXT:    ret
+;
+; LA64D-LABEL: double_fadd_release:
+; LA64D:       # %bb.0:
+; LA64D-NEXT:    addi.d $sp, $sp, -80
+; LA64D-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
+; LA64D-NEXT:    fst.d $fs0, $sp, 24 # 8-byte Folded Spill
+; LA64D-NEXT:    move $fp, $a0
+; LA64D-NEXT:    fld.d $fa0, $a0, 0
+; LA64D-NEXT:    addi.d $a0, $zero, 1
+; LA64D-NEXT:    movgr2fr.d $fa1, $a0
+; LA64D-NEXT:    ffint.d.l $fs0, $fa1
+; LA64D-NEXT:    ori $s0, $zero, 8
+; LA64D-NEXT:    addi.d $s1, $sp, 16
+; LA64D-NEXT:    addi.d $s2, $sp, 8
+; LA64D-NEXT:    ori $s3, $zero, 3
+; LA64D-NEXT:    .p2align 4, , 16
+; LA64D-NEXT:  .LBB12_1: # %atomicrmw.start
+; LA64D-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64D-NEXT:    fst.d $fa0, $sp, 16
+; LA64D-NEXT:    fadd.d $fa0, $fa0, $fs0
+; LA64D-NEXT:    fst.d $fa0, $sp, 8
+; LA64D-NEXT:    move $a0, $s0
+; LA64D-NEXT:    move $a1, $fp
+; LA64D-NEXT:    move $a2, $s1
+; LA64D-NEXT:    move $a3, $s2
+; LA64D-NEXT:    move $a4, $s3
+; LA64D-NEXT:    move $a5, $zero
+; LA64D-NEXT:    bl %plt(__atomic_compare_exchange)
+; LA64D-NEXT:    fld.d $fa0, $sp, 16
+; LA64D-NEXT:    beqz $a0, .LBB12_1
+; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64D-NEXT:    fld.d $fs0, $sp, 24 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
+; LA64D-NEXT:    addi.d $sp, $sp, 80
+; LA64D-NEXT:    ret
+  %v = atomicrmw fadd ptr %p, double 1.0 release, align 4
+  ret double %v
+}
+
+define double @double_fsub_release(ptr %p) nounwind {
+; LA64F-LABEL: double_fsub_release:
+; LA64F:       # %bb.0:
+; LA64F-NEXT:    addi.d $sp, $sp, -80
+; LA64F-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s4, $sp, 24 # 8-byte Folded Spill
+; LA64F-NEXT:    move $fp, $a0
+; LA64F-NEXT:    ld.d $a0, $a0, 0
+; LA64F-NEXT:    lu52i.d $s0, $zero, -1025
+; LA64F-NEXT:    ori $s1, $zero, 8
+; LA64F-NEXT:    addi.d $s2, $sp, 16
+; LA64F-NEXT:    addi.d $s3, $sp, 8
+; LA64F-NEXT:    ori $s4, $zero, 3
+; LA64F-NEXT:    .p2align 4, , 16
+; LA64F-NEXT:  .LBB13_1: # %atomicrmw.start
+; LA64F-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64F-NEXT:    st.d $a0, $sp, 16
+; LA64F-NEXT:    move $a1, $s0
+; LA64F-NEXT:    bl %plt(__adddf3)
+; LA64F-NEXT:    st.d $a0, $sp, 8
+; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    move $a1, $fp
+; LA64F-NEXT:    move $a2, $s2
+; LA64F-NEXT:    move $a3, $s3
+; LA64F-NEXT:    move $a4, $s4
+; LA64F-NEXT:    move $a5, $zero
+; LA64F-NEXT:    bl %plt(__atomic_compare_exchange)
+; LA64F-NEXT:    move $a1, $a0
+; LA64F-NEXT:    ld.d $a0, $sp, 16
+; LA64F-NEXT:    beqz $a1, .LBB13_1
+; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64F-NEXT:    ld.d $s4, $sp, 24 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
+; LA64F-NEXT:    addi.d $sp, $sp, 80
+; LA64F-NEXT:    ret
+;
+; LA64D-LABEL: double_fsub_release:
+; LA64D:       # %bb.0:
+; LA64D-NEXT:    addi.d $sp, $sp, -80
+; LA64D-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
+; LA64D-NEXT:    fst.d $fs0, $sp, 24 # 8-byte Folded Spill
+; LA64D-NEXT:    move $fp, $a0
+; LA64D-NEXT:    fld.d $fa0, $a0, 0
+; LA64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI13_0)
+; LA64D-NEXT:    addi.d $a0, $a0, %pc_lo12(.LCPI13_0)
+; LA64D-NEXT:    fld.d $fs0, $a0, 0
+; LA64D-NEXT:    ori $s0, $zero, 8
+; LA64D-NEXT:    addi.d $s1, $sp, 16
+; LA64D-NEXT:    addi.d $s2, $sp, 8
+; LA64D-NEXT:    ori $s3, $zero, 3
+; LA64D-NEXT:    .p2align 4, , 16
+; LA64D-NEXT:  .LBB13_1: # %atomicrmw.start
+; LA64D-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64D-NEXT:    fst.d $fa0, $sp, 16
+; LA64D-NEXT:    fadd.d $fa0, $fa0, $fs0
+; LA64D-NEXT:    fst.d $fa0, $sp, 8
+; LA64D-NEXT:    move $a0, $s0
+; LA64D-NEXT:    move $a1, $fp
+; LA64D-NEXT:    move $a2, $s1
+; LA64D-NEXT:    move $a3, $s2
+; LA64D-NEXT:    move $a4, $s3
+; LA64D-NEXT:    move $a5, $zero
+; LA64D-NEXT:    bl %plt(__atomic_compare_exchange)
+; LA64D-NEXT:    fld.d $fa0, $sp, 16
+; LA64D-NEXT:    beqz $a0, .LBB13_1
+; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64D-NEXT:    fld.d $fs0, $sp, 24 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
+; LA64D-NEXT:    addi.d $sp, $sp, 80
+; LA64D-NEXT:    ret
+  %v = atomicrmw fsub ptr %p, double 1.0 release, align 4
+  ret double %v
+}
+
+define double @double_fmin_release(ptr %p) nounwind {
+; LA64F-LABEL: double_fmin_release:
+; LA64F:       # %bb.0:
+; LA64F-NEXT:    addi.d $sp, $sp, -80
+; LA64F-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s4, $sp, 24 # 8-byte Folded Spill
+; LA64F-NEXT:    move $fp, $a0
+; LA64F-NEXT:    ld.d $a0, $a0, 0
+; LA64F-NEXT:    lu52i.d $s0, $zero, 1023
+; LA64F-NEXT:    ori $s1, $zero, 8
+; LA64F-NEXT:    addi.d $s2, $sp, 16
+; LA64F-NEXT:    addi.d $s3, $sp, 8
+; LA64F-NEXT:    ori $s4, $zero, 3
+; LA64F-NEXT:    .p2align 4, , 16
+; LA64F-NEXT:  .LBB14_1: # %atomicrmw.start
+; LA64F-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64F-NEXT:    st.d $a0, $sp, 16
+; LA64F-NEXT:    move $a1, $s0
+; LA64F-NEXT:    bl %plt(fmin)
+; LA64F-NEXT:    st.d $a0, $sp, 8
+; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    move $a1, $fp
+; LA64F-NEXT:    move $a2, $s2
+; LA64F-NEXT:    move $a3, $s3
+; LA64F-NEXT:    move $a4, $s4
+; LA64F-NEXT:    move $a5, $zero
+; LA64F-NEXT:    bl %plt(__atomic_compare_exchange)
+; LA64F-NEXT:    move $a1, $a0
+; LA64F-NEXT:    ld.d $a0, $sp, 16
+; LA64F-NEXT:    beqz $a1, .LBB14_1
+; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64F-NEXT:    ld.d $s4, $sp, 24 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
+; LA64F-NEXT:    addi.d $sp, $sp, 80
+; LA64F-NEXT:    ret
+;
+; LA64D-LABEL: double_fmin_release:
+; LA64D:       # %bb.0:
+; LA64D-NEXT:    addi.d $sp, $sp, -80
+; LA64D-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
+; LA64D-NEXT:    fst.d $fs0, $sp, 24 # 8-byte Folded Spill
+; LA64D-NEXT:    move $fp, $a0
+; LA64D-NEXT:    fld.d $fa0, $a0, 0
+; LA64D-NEXT:    addi.d $a0, $zero, 1
+; LA64D-NEXT:    movgr2fr.d $fa1, $a0
+; LA64D-NEXT:    ffint.d.l $fs0, $fa1
+; LA64D-NEXT:    ori $s0, $zero, 8
+; LA64D-NEXT:    addi.d $s1, $sp, 16
+; LA64D-NEXT:    addi.d $s2, $sp, 8
+; LA64D-NEXT:    ori $s3, $zero, 3
+; LA64D-NEXT:    .p2align 4, , 16
+; LA64D-NEXT:  .LBB14_1: # %atomicrmw.start
+; LA64D-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64D-NEXT:    fst.d $fa0, $sp, 16
+; LA64D-NEXT:    fmax.d $fa0, $fa0, $fa0
+; LA64D-NEXT:    fmin.d $fa0, $fa0, $fs0
+; LA64D-NEXT:    fst.d $fa0, $sp, 8
+; LA64D-NEXT:    move $a0, $s0
+; LA64D-NEXT:    move $a1, $fp
+; LA64D-NEXT:    move $a2, $s1
+; LA64D-NEXT:    move $a3, $s2
+; LA64D-NEXT:    move $a4, $s3
+; LA64D-NEXT:    move $a5, $zero
+; LA64D-NEXT:    bl %plt(__atomic_compare_exchange)
+; LA64D-NEXT:    fld.d $fa0, $sp, 16
+; LA64D-NEXT:    beqz $a0, .LBB14_1
+; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64D-NEXT:    fld.d $fs0, $sp, 24 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
+; LA64D-NEXT:    addi.d $sp, $sp, 80
+; LA64D-NEXT:    ret
+  %v = atomicrmw fmin ptr %p, double 1.0 release, align 4
+  ret double %v
+}
+
+define double @double_fmax_release(ptr %p) nounwind {
+; LA64F-LABEL: double_fmax_release:
+; LA64F:       # %bb.0:
+; LA64F-NEXT:    addi.d $sp, $sp, -80
+; LA64F-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s4, $sp, 24 # 8-byte Folded Spill
+; LA64F-NEXT:    move $fp, $a0
+; LA64F-NEXT:    ld.d $a0, $a0, 0
+; LA64F-NEXT:    lu52i.d $s0, $zero, 1023
+; LA64F-NEXT:    ori $s1, $zero, 8
+; LA64F-NEXT:    addi.d $s2, $sp, 16
+; LA64F-NEXT:    addi.d $s3, $sp, 8
+; LA64F-NEXT:    ori $s4, $zero, 3
+; LA64F-NEXT:    .p2align 4, , 16
+; LA64F-NEXT:  .LBB15_1: # %atomicrmw.start
+; LA64F-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64F-NEXT:    st.d $a0, $sp, 16
+; LA64F-NEXT:    move $a1, $s0
+; LA64F-NEXT:    bl %plt(fmax)
+; LA64F-NEXT:    st.d $a0, $sp, 8
+; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    move $a1, $fp
+; LA64F-NEXT:    move $a2, $s2
+; LA64F-NEXT:    move $a3, $s3
+; LA64F-NEXT:    move $a4, $s4
+; LA64F-NEXT:    move $a5, $zero
+; LA64F-NEXT:    bl %plt(__atomic_compare_exchange)
+; LA64F-NEXT:    move $a1, $a0
+; LA64F-NEXT:    ld.d $a0, $sp, 16
+; LA64F-NEXT:    beqz $a1, .LBB15_1
+; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64F-NEXT:    ld.d $s4, $sp, 24 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
+; LA64F-NEXT:    addi.d $sp, $sp, 80
+; LA64F-NEXT:    ret
+;
+; LA64D-LABEL: double_fmax_release:
+; LA64D:       # %bb.0:
+; LA64D-NEXT:    addi.d $sp, $sp, -80
+; LA64D-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
+; LA64D-NEXT:    fst.d $fs0, $sp, 24 # 8-byte Folded Spill
+; LA64D-NEXT:    move $fp, $a0
+; LA64D-NEXT:    fld.d $fa0, $a0, 0
+; LA64D-NEXT:    addi.d $a0, $zero, 1
+; LA64D-NEXT:    movgr2fr.d $fa1, $a0
+; LA64D-NEXT:    ffint.d.l $fs0, $fa1
+; LA64D-NEXT:    ori $s0, $zero, 8
+; LA64D-NEXT:    addi.d $s1, $sp, 16
+; LA64D-NEXT:    addi.d $s2, $sp, 8
+; LA64D-NEXT:    ori $s3, $zero, 3
+; LA64D-NEXT:    .p2align 4, , 16
+; LA64D-NEXT:  .LBB15_1: # %atomicrmw.start
+; LA64D-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64D-NEXT:    fst.d $fa0, $sp, 16
+; LA64D-NEXT:    fmax.d $fa0, $fa0, $fa0
+; LA64D-NEXT:    fmax.d $fa0, $fa0, $fs0
+; LA64D-NEXT:    fst.d $fa0, $sp, 8
+; LA64D-NEXT:    move $a0, $s0
+; LA64D-NEXT:    move $a1, $fp
+; LA64D-NEXT:    move $a2, $s1
+; LA64D-NEXT:    move $a3, $s2
+; LA64D-NEXT:    move $a4, $s3
+; LA64D-NEXT:    move $a5, $zero
+; LA64D-NEXT:    bl %plt(__atomic_compare_exchange)
+; LA64D-NEXT:    fld.d $fa0, $sp, 16
+; LA64D-NEXT:    beqz $a0, .LBB15_1
+; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64D-NEXT:    fld.d $fs0, $sp, 24 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
+; LA64D-NEXT:    addi.d $sp, $sp, 80
+; LA64D-NEXT:    ret
+  %v = atomicrmw fmax ptr %p, double 1.0 release, align 4
+  ret double %v
+}
+
+define float @float_fadd_acq_rel(ptr %p) nounwind {
+; LA64F-LABEL: float_fadd_acq_rel:
+; LA64F:       # %bb.0:
+; LA64F-NEXT:    fld.s $fa0, $a0, 0
+; LA64F-NEXT:    addi.w $a1, $zero, 1
+; LA64F-NEXT:    movgr2fr.w $fa1, $a1
+; LA64F-NEXT:    ffint.s.w $fa1, $fa1
+; LA64F-NEXT:    .p2align 4, , 16
+; LA64F-NEXT:  .LBB16_1: # %atomicrmw.start
+; LA64F-NEXT:    # =>This Loop Header: Depth=1
+; LA64F-NEXT:    # Child Loop BB16_3 Depth 2
+; LA64F-NEXT:    fadd.s $fa2, $fa0, $fa1
+; LA64F-NEXT:    movfr2gr.s $a1, $fa2
+; LA64F-NEXT:    movfr2gr.s $a2, $fa0
+; LA64F-NEXT:  .LBB16_3: # %atomicrmw.start
+; LA64F-NEXT:    # Parent Loop BB16_1 Depth=1
+; LA64F-NEXT:    # => This Inner Loop Header: Depth=2
+; LA64F-NEXT:    ll.w $a3, $a0, 0
+; LA64F-NEXT:    bne $a3, $a2, .LBB16_5
+; LA64F-NEXT:  # %bb.4: # %atomicrmw.start
+; LA64F-NEXT:    # in Loop: Header=BB16_3 Depth=2
+; LA64F-NEXT:    move $a4, $a1
+; LA64F-NEXT:    sc.w $a4, $a0, 0
+; LA64F-NEXT:    beqz $a4, .LBB16_3
+; LA64F-NEXT:    b .LBB16_6
+; LA64F-NEXT:  .LBB16_5: # %atomicrmw.start
+; LA64F-NEXT:    # in Loop: Header=BB16_1 Depth=1
+; LA64F-NEXT:    dbar 0
+; LA64F-NEXT:  .LBB16_6: # %atomicrmw.start
+; LA64F-NEXT:    # in Loop: Header=BB16_1 Depth=1
+; LA64F-NEXT:    movgr2fr.w $fa0, $a3
+; LA64F-NEXT:    addi.w $a1, $a2, 0
+; LA64F-NEXT:    bne $a3, $a1, .LBB16_1
+; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64F-NEXT:    ret
+;
+; LA64D-LABEL: float_fadd_acq_rel:
+; LA64D:       # %bb.0:
+; LA64D-NEXT:    fld.s $fa0, $a0, 0
+; LA64D-NEXT:    addi.w $a1, $zero, 1
+; LA64D-NEXT:    movgr2fr.w $fa1, $a1
+; LA64D-NEXT:    ffint.s.w $fa1, $fa1
+; LA64D-NEXT:    .p2align 4, , 16
+; LA64D-NEXT:  .LBB16_1: # %atomicrmw.start
+; LA64D-NEXT:    # =>This Loop Header: Depth=1
+; LA64D-NEXT:    # Child Loop BB16_3 Depth 2
+; LA64D-NEXT:    fadd.s $fa2, $fa0, $fa1
+; LA64D-NEXT:    movfr2gr.s $a1, $fa2
+; LA64D-NEXT:    movfr2gr.s $a2, $fa0
+; LA64D-NEXT:  .LBB16_3: # %atomicrmw.start
+; LA64D-NEXT:    # Parent Loop BB16_1 Depth=1
+; LA64D-NEXT:    # => This Inner Loop Header: Depth=2
+; LA64D-NEXT:    ll.w $a3, $a0, 0
+; LA64D-NEXT:    bne $a3, $a2, .LBB16_5
+; LA64D-NEXT:  # %bb.4: # %atomicrmw.start
+; LA64D-NEXT:    # in Loop: Header=BB16_3 Depth=2
+; LA64D-NEXT:    move $a4, $a1
+; LA64D-NEXT:    sc.w $a4, $a0, 0
+; LA64D-NEXT:    beqz $a4, .LBB16_3
+; LA64D-NEXT:    b .LBB16_6
+; LA64D-NEXT:  .LBB16_5: # %atomicrmw.start
+; LA64D-NEXT:    # in Loop: Header=BB16_1 Depth=1
+; LA64D-NEXT:    dbar 0
+; LA64D-NEXT:  .LBB16_6: # %atomicrmw.start
+; LA64D-NEXT:    # in Loop: Header=BB16_1 Depth=1
+; LA64D-NEXT:    movgr2fr.w $fa0, $a3
+; LA64D-NEXT:    addi.w $a1, $a2, 0
+; LA64D-NEXT:    bne $a3, $a1, .LBB16_1
+; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64D-NEXT:    ret
+  %v = atomicrmw fadd ptr %p, float 1.0 acq_rel, align 4
+  ret float %v
+}
+
+define float @float_fsub_acq_rel(ptr %p) nounwind {
+; LA64F-LABEL: float_fsub_acq_rel:
+; LA64F:       # %bb.0:
+; LA64F-NEXT:    fld.s $fa0, $a0, 0
+; LA64F-NEXT:    pcalau12i $a1, %pc_hi20(.LCPI17_0)
+; LA64F-NEXT:    addi.d $a1, $a1, %pc_lo12(.LCPI17_0)
+; LA64F-NEXT:    fld.s $fa1, $a1, 0
+; LA64F-NEXT:    .p2align 4, , 16
+; LA64F-NEXT:  .LBB17_1: # %atomicrmw.start
+; LA64F-NEXT:    # =>This Loop Header: Depth=1
+; LA64F-NEXT:    # Child Loop BB17_3 Depth 2
+; LA64F-NEXT:    fadd.s $fa2, $fa0, $fa1
+; LA64F-NEXT:    movfr2gr.s $a1, $fa2
+; LA64F-NEXT:    movfr2gr.s $a2, $fa0
+; LA64F-NEXT:  .LBB17_3: # %atomicrmw.start
+; LA64F-NEXT:    # Parent Loop BB17_1 Depth=1
+; LA64F-NEXT:    # => This Inner Loop Header: Depth=2
+; LA64F-NEXT:    ll.w $a3, $a0, 0
+; LA64F-NEXT:    bne $a3, $a2, .LBB17_5
+; LA64F-NEXT:  # %bb.4: # %atomicrmw.start
+; LA64F-NEXT:    # in Loop: Header=BB17_3 Depth=2
+; LA64F-NEXT:    move $a4, $a1
+; LA64F-NEXT:    sc.w $a4, $a0, 0
+; LA64F-NEXT:    beqz $a4, .LBB17_3
+; LA64F-NEXT:    b .LBB17_6
+; LA64F-NEXT:  .LBB17_5: # %atomicrmw.start
+; LA64F-NEXT:    # in Loop: Header=BB17_1 Depth=1
+; LA64F-NEXT:    dbar 0
+; LA64F-NEXT:  .LBB17_6: # %atomicrmw.start
+; LA64F-NEXT:    # in Loop: Header=BB17_1 Depth=1
+; LA64F-NEXT:    movgr2fr.w $fa0, $a3
+; LA64F-NEXT:    addi.w $a1, $a2, 0
+; LA64F-NEXT:    bne $a3, $a1, .LBB17_1
+; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64F-NEXT:    ret
+;
+; LA64D-LABEL: float_fsub_acq_rel:
+; LA64D:       # %bb.0:
+; LA64D-NEXT:    fld.s $fa0, $a0, 0
+; LA64D-NEXT:    pcalau12i $a1, %pc_hi20(.LCPI17_0)
+; LA64D-NEXT:    addi.d $a1, $a1, %pc_lo12(.LCPI17_0)
+; LA64D-NEXT:    fld.s $fa1, $a1, 0
+; LA64D-NEXT:    .p2align 4, , 16
+; LA64D-NEXT:  .LBB17_1: # %atomicrmw.start
+; LA64D-NEXT:    # =>This Loop Header: Depth=1
+; LA64D-NEXT:    # Child Loop BB17_3 Depth 2
+; LA64D-NEXT:    fadd.s $fa2, $fa0, $fa1
+; LA64D-NEXT:    movfr2gr.s $a1, $fa2
+; LA64D-NEXT:    movfr2gr.s $a2, $fa0
+; LA64D-NEXT:  .LBB17_3: # %atomicrmw.start
+; LA64D-NEXT:    # Parent Loop BB17_1 Depth=1
+; LA64D-NEXT:    # => This Inner Loop Header: Depth=2
+; LA64D-NEXT:    ll.w $a3, $a0, 0
+; LA64D-NEXT:    bne $a3, $a2, .LBB17_5
+; LA64D-NEXT:  # %bb.4: # %atomicrmw.start
+; LA64D-NEXT:    # in Loop: Header=BB17_3 Depth=2
+; LA64D-NEXT:    move $a4, $a1
+; LA64D-NEXT:    sc.w $a4, $a0, 0
+; LA64D-NEXT:    beqz $a4, .LBB17_3
+; LA64D-NEXT:    b .LBB17_6
+; LA64D-NEXT:  .LBB17_5: # %atomicrmw.start
+; LA64D-NEXT:    # in Loop: Header=BB17_1 Depth=1
+; LA64D-NEXT:    dbar 0
+; LA64D-NEXT:  .LBB17_6: # %atomicrmw.start
+; LA64D-NEXT:    # in Loop: Header=BB17_1 Depth=1
+; LA64D-NEXT:    movgr2fr.w $fa0, $a3
+; LA64D-NEXT:    addi.w $a1, $a2, 0
+; LA64D-NEXT:    bne $a3, $a1, .LBB17_1
+; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64D-NEXT:    ret
+  %v = atomicrmw fsub ptr %p, float 1.0 acq_rel, align 4
+  ret float %v
+}
+
+define float @float_fmin_acq_rel(ptr %p) nounwind {
+; LA64F-LABEL: float_fmin_acq_rel:
+; LA64F:       # %bb.0:
+; LA64F-NEXT:    fld.s $fa0, $a0, 0
+; LA64F-NEXT:    addi.w $a1, $zero, 1
+; LA64F-NEXT:    movgr2fr.w $fa1, $a1
+; LA64F-NEXT:    ffint.s.w $fa1, $fa1
+; LA64F-NEXT:    .p2align 4, , 16
+; LA64F-NEXT:  .LBB18_1: # %atomicrmw.start
+; LA64F-NEXT:    # =>This Loop Header: Depth=1
+; LA64F-NEXT:    # Child Loop BB18_3 Depth 2
+; LA64F-NEXT:    fmax.s $fa2, $fa0, $fa0
+; LA64F-NEXT:    fmin.s $fa2, $fa2, $fa1
+; LA64F-NEXT:    movfr2gr.s $a1, $fa2
+; LA64F-NEXT:    movfr2gr.s $a2, $fa0
+; LA64F-NEXT:  .LBB18_3: # %atomicrmw.start
+; LA64F-NEXT:    # Parent Loop BB18_1 Depth=1
+; LA64F-NEXT:    # => This Inner Loop Header: Depth=2
+; LA64F-NEXT:    ll.w $a3, $a0, 0
+; LA64F-NEXT:    bne $a3, $a2, .LBB18_5
+; LA64F-NEXT:  # %bb.4: # %atomicrmw.start
+; LA64F-NEXT:    # in Loop: Header=BB18_3 Depth=2
+; LA64F-NEXT:    move $a4, $a1
+; LA64F-NEXT:    sc.w $a4, $a0, 0
+; LA64F-NEXT:    beqz $a4, .LBB18_3
+; LA64F-NEXT:    b .LBB18_6
+; LA64F-NEXT:  .LBB18_5: # %atomicrmw.start
+; LA64F-NEXT:    # in Loop: Header=BB18_1 Depth=1
+; LA64F-NEXT:    dbar 0
+; LA64F-NEXT:  .LBB18_6: # %atomicrmw.start
+; LA64F-NEXT:    # in Loop: Header=BB18_1 Depth=1
+; LA64F-NEXT:    movgr2fr.w $fa0, $a3
+; LA64F-NEXT:    addi.w $a1, $a2, 0
+; LA64F-NEXT:    bne $a3, $a1, .LBB18_1
+; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64F-NEXT:    ret
+;
+; LA64D-LABEL: float_fmin_acq_rel:
+; LA64D:       # %bb.0:
+; LA64D-NEXT:    fld.s $fa0, $a0, 0
+; LA64D-NEXT:    addi.w $a1, $zero, 1
+; LA64D-NEXT:    movgr2fr.w $fa1, $a1
+; LA64D-NEXT:    ffint.s.w $fa1, $fa1
+; LA64D-NEXT:    .p2align 4, , 16
+; LA64D-NEXT:  .LBB18_1: # %atomicrmw.start
+; LA64D-NEXT:    # =>This Loop Header: Depth=1
+; LA64D-NEXT:    # Child Loop BB18_3 Depth 2
+; LA64D-NEXT:    fmax.s $fa2, $fa0, $fa0
+; LA64D-NEXT:    fmin.s $fa2, $fa2, $fa1
+; LA64D-NEXT:    movfr2gr.s $a1, $fa2
+; LA64D-NEXT:    movfr2gr.s $a2, $fa0
+; LA64D-NEXT:  .LBB18_3: # %atomicrmw.start
+; LA64D-NEXT:    # Parent Loop BB18_1 Depth=1
+; LA64D-NEXT:    # => This Inner Loop Header: Depth=2
+; LA64D-NEXT:    ll.w $a3, $a0, 0
+; LA64D-NEXT:    bne $a3, $a2, .LBB18_5
+; LA64D-NEXT:  # %bb.4: # %atomicrmw.start
+; LA64D-NEXT:    # in Loop: Header=BB18_3 Depth=2
+; LA64D-NEXT:    move $a4, $a1
+; LA64D-NEXT:    sc.w $a4, $a0, 0
+; LA64D-NEXT:    beqz $a4, .LBB18_3
+; LA64D-NEXT:    b .LBB18_6
+; LA64D-NEXT:  .LBB18_5: # %atomicrmw.start
+; LA64D-NEXT:    # in Loop: Header=BB18_1 Depth=1
+; LA64D-NEXT:    dbar 0
+; LA64D-NEXT:  .LBB18_6: # %atomicrmw.start
+; LA64D-NEXT:    # in Loop: Header=BB18_1 Depth=1
+; LA64D-NEXT:    movgr2fr.w $fa0, $a3
+; LA64D-NEXT:    addi.w $a1, $a2, 0
+; LA64D-NEXT:    bne $a3, $a1, .LBB18_1
+; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64D-NEXT:    ret
+  %v = atomicrmw fmin ptr %p, float 1.0 acq_rel, align 4
+  ret float %v
+}
+
+define float @float_fmax_acq_rel(ptr %p) nounwind {
+; LA64F-LABEL: float_fmax_acq_rel:
+; LA64F:       # %bb.0:
+; LA64F-NEXT:    fld.s $fa0, $a0, 0
+; LA64F-NEXT:    addi.w $a1, $zero, 1
+; LA64F-NEXT:    movgr2fr.w $fa1, $a1
+; LA64F-NEXT:    ffint.s.w $fa1, $fa1
+; LA64F-NEXT:    .p2align 4, , 16
+; LA64F-NEXT:  .LBB19_1: # %atomicrmw.start
+; LA64F-NEXT:    # =>This Loop Header: Depth=1
+; LA64F-NEXT:    # Child Loop BB19_3 Depth 2
+; LA64F-NEXT:    fmax.s $fa2, $fa0, $fa0
+; LA64F-NEXT:    fmax.s $fa2, $fa2, $fa1
+; LA64F-NEXT:    movfr2gr.s $a1, $fa2
+; LA64F-NEXT:    movfr2gr.s $a2, $fa0
+; LA64F-NEXT:  .LBB19_3: # %atomicrmw.start
+; LA64F-NEXT:    # Parent Loop BB19_1 Depth=1
+; LA64F-NEXT:    # => This Inner Loop Header: Depth=2
+; LA64F-NEXT:    ll.w $a3, $a0, 0
+; LA64F-NEXT:    bne $a3, $a2, .LBB19_5
+; LA64F-NEXT:  # %bb.4: # %atomicrmw.start
+; LA64F-NEXT:    # in Loop: Header=BB19_3 Depth=2
+; LA64F-NEXT:    move $a4, $a1
+; LA64F-NEXT:    sc.w $a4, $a0, 0
+; LA64F-NEXT:    beqz $a4, .LBB19_3
+; LA64F-NEXT:    b .LBB19_6
+; LA64F-NEXT:  .LBB19_5: # %atomicrmw.start
+; LA64F-NEXT:    # in Loop: Header=BB19_1 Depth=1
+; LA64F-NEXT:    dbar 0
+; LA64F-NEXT:  .LBB19_6: # %atomicrmw.start
+; LA64F-NEXT:    # in Loop: Header=BB19_1 Depth=1
+; LA64F-NEXT:    movgr2fr.w $fa0, $a3
+; LA64F-NEXT:    addi.w $a1, $a2, 0
+; LA64F-NEXT:    bne $a3, $a1, .LBB19_1
+; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64F-NEXT:    ret
+;
+; LA64D-LABEL: float_fmax_acq_rel:
+; LA64D:       # %bb.0:
+; LA64D-NEXT:    fld.s $fa0, $a0, 0
+; LA64D-NEXT:    addi.w $a1, $zero, 1
+; LA64D-NEXT:    movgr2fr.w $fa1, $a1
+; LA64D-NEXT:    ffint.s.w $fa1, $fa1
+; LA64D-NEXT:    .p2align 4, , 16
+; LA64D-NEXT:  .LBB19_1: # %atomicrmw.start
+; LA64D-NEXT:    # =>This Loop Header: Depth=1
+; LA64D-NEXT:    # Child Loop BB19_3 Depth 2
+; LA64D-NEXT:    fmax.s $fa2, $fa0, $fa0
+; LA64D-NEXT:    fmax.s $fa2, $fa2, $fa1
+; LA64D-NEXT:    movfr2gr.s $a1, $fa2
+; LA64D-NEXT:    movfr2gr.s $a2, $fa0
+; LA64D-NEXT:  .LBB19_3: # %atomicrmw.start
+; LA64D-NEXT:    # Parent Loop BB19_1 Depth=1
+; LA64D-NEXT:    # => This Inner Loop Header: Depth=2
+; LA64D-NEXT:    ll.w $a3, $a0, 0
+; LA64D-NEXT:    bne $a3, $a2, .LBB19_5
+; LA64D-NEXT:  # %bb.4: # %atomicrmw.start
+; LA64D-NEXT:    # in Loop: Header=BB19_3 Depth=2
+; LA64D-NEXT:    move $a4, $a1
+; LA64D-NEXT:    sc.w $a4, $a0, 0
+; LA64D-NEXT:    beqz $a4, .LBB19_3
+; LA64D-NEXT:    b .LBB19_6
+; LA64D-NEXT:  .LBB19_5: # %atomicrmw.start
+; LA64D-NEXT:    # in Loop: Header=BB19_1 Depth=1
+; LA64D-NEXT:    dbar 0
+; LA64D-NEXT:  .LBB19_6: # %atomicrmw.start
+; LA64D-NEXT:    # in Loop: Header=BB19_1 Depth=1
+; LA64D-NEXT:    movgr2fr.w $fa0, $a3
+; LA64D-NEXT:    addi.w $a1, $a2, 0
+; LA64D-NEXT:    bne $a3, $a1, .LBB19_1
+; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64D-NEXT:    ret
+  %v = atomicrmw fmax ptr %p, float 1.0 acq_rel, align 4
+  ret float %v
+}
+
+define double @double_fadd_acq_rel(ptr %p) nounwind {
+; LA64F-LABEL: double_fadd_acq_rel:
+; LA64F:       # %bb.0:
+; LA64F-NEXT:    addi.d $sp, $sp, -80
+; LA64F-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s4, $sp, 24 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s5, $sp, 16 # 8-byte Folded Spill
+; LA64F-NEXT:    move $fp, $a0
+; LA64F-NEXT:    ld.d $a0, $a0, 0
+; LA64F-NEXT:    lu52i.d $s0, $zero, 1023
+; LA64F-NEXT:    ori $s1, $zero, 8
+; LA64F-NEXT:    addi.d $s2, $sp, 8
+; LA64F-NEXT:    addi.d $s3, $sp, 0
+; LA64F-NEXT:    ori $s4, $zero, 4
+; LA64F-NEXT:    ori $s5, $zero, 2
+; LA64F-NEXT:    .p2align 4, , 16
+; LA64F-NEXT:  .LBB20_1: # %atomicrmw.start
+; LA64F-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64F-NEXT:    st.d $a0, $sp, 8
+; LA64F-NEXT:    move $a1, $s0
+; LA64F-NEXT:    bl %plt(__adddf3)
+; LA64F-NEXT:    st.d $a0, $sp, 0
+; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    move $a1, $fp
+; LA64F-NEXT:    move $a2, $s2
+; LA64F-NEXT:    move $a3, $s3
+; LA64F-NEXT:    move $a4, $s4
+; LA64F-NEXT:    move $a5, $s5
+; LA64F-NEXT:    bl %plt(__atomic_compare_exchange)
+; LA64F-NEXT:    move $a1, $a0
+; LA64F-NEXT:    ld.d $a0, $sp, 8
+; LA64F-NEXT:    beqz $a1, .LBB20_1
+; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64F-NEXT:    ld.d $s5, $sp, 16 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s4, $sp, 24 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
+; LA64F-NEXT:    addi.d $sp, $sp, 80
+; LA64F-NEXT:    ret
+;
+; LA64D-LABEL: double_fadd_acq_rel:
+; LA64D:       # %bb.0:
+; LA64D-NEXT:    addi.d $sp, $sp, -80
+; LA64D-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $s4, $sp, 24 # 8-byte Folded Spill
+; LA64D-NEXT:    fst.d $fs0, $sp, 16 # 8-byte Folded Spill
+; LA64D-NEXT:    move $fp, $a0
+; LA64D-NEXT:    fld.d $fa0, $a0, 0
+; LA64D-NEXT:    addi.d $a0, $zero, 1
+; LA64D-NEXT:    movgr2fr.d $fa1, $a0
+; LA64D-NEXT:    ffint.d.l $fs0, $fa1
+; LA64D-NEXT:    ori $s0, $zero, 8
+; LA64D-NEXT:    addi.d $s1, $sp, 8
+; LA64D-NEXT:    addi.d $s2, $sp, 0
+; LA64D-NEXT:    ori $s3, $zero, 4
+; LA64D-NEXT:    ori $s4, $zero, 2
+; LA64D-NEXT:    .p2align 4, , 16
+; LA64D-NEXT:  .LBB20_1: # %atomicrmw.start
+; LA64D-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64D-NEXT:    fst.d $fa0, $sp, 8
+; LA64D-NEXT:    fadd.d $fa0, $fa0, $fs0
+; LA64D-NEXT:    fst.d $fa0, $sp, 0
+; LA64D-NEXT:    move $a0, $s0
+; LA64D-NEXT:    move $a1, $fp
+; LA64D-NEXT:    move $a2, $s1
+; LA64D-NEXT:    move $a3, $s2
+; LA64D-NEXT:    move $a4, $s3
+; LA64D-NEXT:    move $a5, $s4
+; LA64D-NEXT:    bl %plt(__atomic_compare_exchange)
+; LA64D-NEXT:    fld.d $fa0, $sp, 8
+; LA64D-NEXT:    beqz $a0, .LBB20_1
+; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64D-NEXT:    fld.d $fs0, $sp, 16 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $s4, $sp, 24 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
+; LA64D-NEXT:    addi.d $sp, $sp, 80
+; LA64D-NEXT:    ret
+  %v = atomicrmw fadd ptr %p, double 1.0 acq_rel, align 4
+  ret double %v
+}
+
+define double @double_fsub_acq_rel(ptr %p) nounwind {
+; LA64F-LABEL: double_fsub_acq_rel:
+; LA64F:       # %bb.0:
+; LA64F-NEXT:    addi.d $sp, $sp, -80
+; LA64F-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s4, $sp, 24 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s5, $sp, 16 # 8-byte Folded Spill
+; LA64F-NEXT:    move $fp, $a0
+; LA64F-NEXT:    ld.d $a0, $a0, 0
+; LA64F-NEXT:    lu52i.d $s0, $zero, -1025
+; LA64F-NEXT:    ori $s1, $zero, 8
+; LA64F-NEXT:    addi.d $s2, $sp, 8
+; LA64F-NEXT:    addi.d $s3, $sp, 0
+; LA64F-NEXT:    ori $s4, $zero, 4
+; LA64F-NEXT:    ori $s5, $zero, 2
+; LA64F-NEXT:    .p2align 4, , 16
+; LA64F-NEXT:  .LBB21_1: # %atomicrmw.start
+; LA64F-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64F-NEXT:    st.d $a0, $sp, 8
+; LA64F-NEXT:    move $a1, $s0
+; LA64F-NEXT:    bl %plt(__adddf3)
+; LA64F-NEXT:    st.d $a0, $sp, 0
+; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    move $a1, $fp
+; LA64F-NEXT:    move $a2, $s2
+; LA64F-NEXT:    move $a3, $s3
+; LA64F-NEXT:    move $a4, $s4
+; LA64F-NEXT:    move $a5, $s5
+; LA64F-NEXT:    bl %plt(__atomic_compare_exchange)
+; LA64F-NEXT:    move $a1, $a0
+; LA64F-NEXT:    ld.d $a0, $sp, 8
+; LA64F-NEXT:    beqz $a1, .LBB21_1
+; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64F-NEXT:    ld.d $s5, $sp, 16 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s4, $sp, 24 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
+; LA64F-NEXT:    addi.d $sp, $sp, 80
+; LA64F-NEXT:    ret
+;
+; LA64D-LABEL: double_fsub_acq_rel:
+; LA64D:       # %bb.0:
+; LA64D-NEXT:    addi.d $sp, $sp, -80
+; LA64D-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $s4, $sp, 24 # 8-byte Folded Spill
+; LA64D-NEXT:    fst.d $fs0, $sp, 16 # 8-byte Folded Spill
+; LA64D-NEXT:    move $fp, $a0
+; LA64D-NEXT:    fld.d $fa0, $a0, 0
+; LA64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI21_0)
+; LA64D-NEXT:    addi.d $a0, $a0, %pc_lo12(.LCPI21_0)
+; LA64D-NEXT:    fld.d $fs0, $a0, 0
+; LA64D-NEXT:    ori $s0, $zero, 8
+; LA64D-NEXT:    addi.d $s1, $sp, 8
+; LA64D-NEXT:    addi.d $s2, $sp, 0
+; LA64D-NEXT:    ori $s3, $zero, 4
+; LA64D-NEXT:    ori $s4, $zero, 2
+; LA64D-NEXT:    .p2align 4, , 16
+; LA64D-NEXT:  .LBB21_1: # %atomicrmw.start
+; LA64D-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64D-NEXT:    fst.d $fa0, $sp, 8
+; LA64D-NEXT:    fadd.d $fa0, $fa0, $fs0
+; LA64D-NEXT:    fst.d $fa0, $sp, 0
+; LA64D-NEXT:    move $a0, $s0
+; LA64D-NEXT:    move $a1, $fp
+; LA64D-NEXT:    move $a2, $s1
+; LA64D-NEXT:    move $a3, $s2
+; LA64D-NEXT:    move $a4, $s3
+; LA64D-NEXT:    move $a5, $s4
+; LA64D-NEXT:    bl %plt(__atomic_compare_exchange)
+; LA64D-NEXT:    fld.d $fa0, $sp, 8
+; LA64D-NEXT:    beqz $a0, .LBB21_1
+; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64D-NEXT:    fld.d $fs0, $sp, 16 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $s4, $sp, 24 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
+; LA64D-NEXT:    addi.d $sp, $sp, 80
+; LA64D-NEXT:    ret
+  %v = atomicrmw fsub ptr %p, double 1.0 acq_rel, align 4
+  ret double %v
+}
+
+define double @double_fmin_acq_rel(ptr %p) nounwind {
+; LA64F-LABEL: double_fmin_acq_rel:
+; LA64F:       # %bb.0:
+; LA64F-NEXT:    addi.d $sp, $sp, -80
+; LA64F-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s4, $sp, 24 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s5, $sp, 16 # 8-byte Folded Spill
+; LA64F-NEXT:    move $fp, $a0
+; LA64F-NEXT:    ld.d $a0, $a0, 0
+; LA64F-NEXT:    lu52i.d $s0, $zero, 1023
+; LA64F-NEXT:    ori $s1, $zero, 8
+; LA64F-NEXT:    addi.d $s2, $sp, 8
+; LA64F-NEXT:    addi.d $s3, $sp, 0
+; LA64F-NEXT:    ori $s4, $zero, 4
+; LA64F-NEXT:    ori $s5, $zero, 2
+; LA64F-NEXT:    .p2align 4, , 16
+; LA64F-NEXT:  .LBB22_1: # %atomicrmw.start
+; LA64F-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64F-NEXT:    st.d $a0, $sp, 8
+; LA64F-NEXT:    move $a1, $s0
+; LA64F-NEXT:    bl %plt(fmin)
+; LA64F-NEXT:    st.d $a0, $sp, 0
+; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    move $a1, $fp
+; LA64F-NEXT:    move $a2, $s2
+; LA64F-NEXT:    move $a3, $s3
+; LA64F-NEXT:    move $a4, $s4
+; LA64F-NEXT:    move $a5, $s5
+; LA64F-NEXT:    bl %plt(__atomic_compare_exchange)
+; LA64F-NEXT:    move $a1, $a0
+; LA64F-NEXT:    ld.d $a0, $sp, 8
+; LA64F-NEXT:    beqz $a1, .LBB22_1
+; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64F-NEXT:    ld.d $s5, $sp, 16 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s4, $sp, 24 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
+; LA64F-NEXT:    addi.d $sp, $sp, 80
+; LA64F-NEXT:    ret
+;
+; LA64D-LABEL: double_fmin_acq_rel:
+; LA64D:       # %bb.0:
+; LA64D-NEXT:    addi.d $sp, $sp, -80
+; LA64D-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $s4, $sp, 24 # 8-byte Folded Spill
+; LA64D-NEXT:    fst.d $fs0, $sp, 16 # 8-byte Folded Spill
+; LA64D-NEXT:    move $fp, $a0
+; LA64D-NEXT:    fld.d $fa0, $a0, 0
+; LA64D-NEXT:    addi.d $a0, $zero, 1
+; LA64D-NEXT:    movgr2fr.d $fa1, $a0
+; LA64D-NEXT:    ffint.d.l $fs0, $fa1
+; LA64D-NEXT:    ori $s0, $zero, 8
+; LA64D-NEXT:    addi.d $s1, $sp, 8
+; LA64D-NEXT:    addi.d $s2, $sp, 0
+; LA64D-NEXT:    ori $s3, $zero, 4
+; LA64D-NEXT:    ori $s4, $zero, 2
+; LA64D-NEXT:    .p2align 4, , 16
+; LA64D-NEXT:  .LBB22_1: # %atomicrmw.start
+; LA64D-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64D-NEXT:    fst.d $fa0, $sp, 8
+; LA64D-NEXT:    fmax.d $fa0, $fa0, $fa0
+; LA64D-NEXT:    fmin.d $fa0, $fa0, $fs0
+; LA64D-NEXT:    fst.d $fa0, $sp, 0
+; LA64D-NEXT:    move $a0, $s0
+; LA64D-NEXT:    move $a1, $fp
+; LA64D-NEXT:    move $a2, $s1
+; LA64D-NEXT:    move $a3, $s2
+; LA64D-NEXT:    move $a4, $s3
+; LA64D-NEXT:    move $a5, $s4
+; LA64D-NEXT:    bl %plt(__atomic_compare_exchange)
+; LA64D-NEXT:    fld.d $fa0, $sp, 8
+; LA64D-NEXT:    beqz $a0, .LBB22_1
+; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64D-NEXT:    fld.d $fs0, $sp, 16 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $s4, $sp, 24 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
+; LA64D-NEXT:    addi.d $sp, $sp, 80
+; LA64D-NEXT:    ret
+  %v = atomicrmw fmin ptr %p, double 1.0 acq_rel, align 4
+  ret double %v
+}
+
+define double @double_fmax_acq_rel(ptr %p) nounwind {
+; LA64F-LABEL: double_fmax_acq_rel:
+; LA64F:       # %bb.0:
+; LA64F-NEXT:    addi.d $sp, $sp, -80
+; LA64F-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s4, $sp, 24 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s5, $sp, 16 # 8-byte Folded Spill
+; LA64F-NEXT:    move $fp, $a0
+; LA64F-NEXT:    ld.d $a0, $a0, 0
+; LA64F-NEXT:    lu52i.d $s0, $zero, 1023
+; LA64F-NEXT:    ori $s1, $zero, 8
+; LA64F-NEXT:    addi.d $s2, $sp, 8
+; LA64F-NEXT:    addi.d $s3, $sp, 0
+; LA64F-NEXT:    ori $s4, $zero, 4
+; LA64F-NEXT:    ori $s5, $zero, 2
+; LA64F-NEXT:    .p2align 4, , 16
+; LA64F-NEXT:  .LBB23_1: # %atomicrmw.start
+; LA64F-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64F-NEXT:    st.d $a0, $sp, 8
+; LA64F-NEXT:    move $a1, $s0
+; LA64F-NEXT:    bl %plt(fmax)
+; LA64F-NEXT:    st.d $a0, $sp, 0
+; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    move $a1, $fp
+; LA64F-NEXT:    move $a2, $s2
+; LA64F-NEXT:    move $a3, $s3
+; LA64F-NEXT:    move $a4, $s4
+; LA64F-NEXT:    move $a5, $s5
+; LA64F-NEXT:    bl %plt(__atomic_compare_exchange)
+; LA64F-NEXT:    move $a1, $a0
+; LA64F-NEXT:    ld.d $a0, $sp, 8
+; LA64F-NEXT:    beqz $a1, .LBB23_1
+; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64F-NEXT:    ld.d $s5, $sp, 16 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s4, $sp, 24 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
+; LA64F-NEXT:    addi.d $sp, $sp, 80
+; LA64F-NEXT:    ret
+;
+; LA64D-LABEL: double_fmax_acq_rel:
+; LA64D:       # %bb.0:
+; LA64D-NEXT:    addi.d $sp, $sp, -80
+; LA64D-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $s4, $sp, 24 # 8-byte Folded Spill
+; LA64D-NEXT:    fst.d $fs0, $sp, 16 # 8-byte Folded Spill
+; LA64D-NEXT:    move $fp, $a0
+; LA64D-NEXT:    fld.d $fa0, $a0, 0
+; LA64D-NEXT:    addi.d $a0, $zero, 1
+; LA64D-NEXT:    movgr2fr.d $fa1, $a0
+; LA64D-NEXT:    ffint.d.l $fs0, $fa1
+; LA64D-NEXT:    ori $s0, $zero, 8
+; LA64D-NEXT:    addi.d $s1, $sp, 8
+; LA64D-NEXT:    addi.d $s2, $sp, 0
+; LA64D-NEXT:    ori $s3, $zero, 4
+; LA64D-NEXT:    ori $s4, $zero, 2
+; LA64D-NEXT:    .p2align 4, , 16
+; LA64D-NEXT:  .LBB23_1: # %atomicrmw.start
+; LA64D-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64D-NEXT:    fst.d $fa0, $sp, 8
+; LA64D-NEXT:    fmax.d $fa0, $fa0, $fa0
+; LA64D-NEXT:    fmax.d $fa0, $fa0, $fs0
+; LA64D-NEXT:    fst.d $fa0, $sp, 0
+; LA64D-NEXT:    move $a0, $s0
+; LA64D-NEXT:    move $a1, $fp
+; LA64D-NEXT:    move $a2, $s1
+; LA64D-NEXT:    move $a3, $s2
+; LA64D-NEXT:    move $a4, $s3
+; LA64D-NEXT:    move $a5, $s4
+; LA64D-NEXT:    bl %plt(__atomic_compare_exchange)
+; LA64D-NEXT:    fld.d $fa0, $sp, 8
+; LA64D-NEXT:    beqz $a0, .LBB23_1
+; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64D-NEXT:    fld.d $fs0, $sp, 16 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $s4, $sp, 24 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
+; LA64D-NEXT:    addi.d $sp, $sp, 80
+; LA64D-NEXT:    ret
+  %v = atomicrmw fmax ptr %p, double 1.0 acq_rel, align 4
+  ret double %v
+}
+
+define float @float_fadd_seq_cst(ptr %p) nounwind {
+; LA64F-LABEL: float_fadd_seq_cst:
+; LA64F:       # %bb.0:
+; LA64F-NEXT:    fld.s $fa0, $a0, 0
+; LA64F-NEXT:    addi.w $a1, $zero, 1
+; LA64F-NEXT:    movgr2fr.w $fa1, $a1
+; LA64F-NEXT:    ffint.s.w $fa1, $fa1
+; LA64F-NEXT:    .p2align 4, , 16
+; LA64F-NEXT:  .LBB24_1: # %atomicrmw.start
+; LA64F-NEXT:    # =>This Loop Header: Depth=1
+; LA64F-NEXT:    # Child Loop BB24_3 Depth 2
+; LA64F-NEXT:    fadd.s $fa2, $fa0, $fa1
+; LA64F-NEXT:    movfr2gr.s $a1, $fa2
+; LA64F-NEXT:    movfr2gr.s $a2, $fa0
+; LA64F-NEXT:  .LBB24_3: # %atomicrmw.start
+; LA64F-NEXT:    # Parent Loop BB24_1 Depth=1
+; LA64F-NEXT:    # => This Inner Loop Header: Depth=2
+; LA64F-NEXT:    ll.w $a3, $a0, 0
+; LA64F-NEXT:    bne $a3, $a2, .LBB24_5
+; LA64F-NEXT:  # %bb.4: # %atomicrmw.start
+; LA64F-NEXT:    # in Loop: Header=BB24_3 Depth=2
+; LA64F-NEXT:    move $a4, $a1
+; LA64F-NEXT:    sc.w $a4, $a0, 0
+; LA64F-NEXT:    beqz $a4, .LBB24_3
+; LA64F-NEXT:    b .LBB24_6
+; LA64F-NEXT:  .LBB24_5: # %atomicrmw.start
+; LA64F-NEXT:    # in Loop: Header=BB24_1 Depth=1
+; LA64F-NEXT:    dbar 0
+; LA64F-NEXT:  .LBB24_6: # %atomicrmw.start
+; LA64F-NEXT:    # in Loop: Header=BB24_1 Depth=1
+; LA64F-NEXT:    movgr2fr.w $fa0, $a3
+; LA64F-NEXT:    addi.w $a1, $a2, 0
+; LA64F-NEXT:    bne $a3, $a1, .LBB24_1
+; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64F-NEXT:    ret
+;
+; LA64D-LABEL: float_fadd_seq_cst:
+; LA64D:       # %bb.0:
+; LA64D-NEXT:    fld.s $fa0, $a0, 0
+; LA64D-NEXT:    addi.w $a1, $zero, 1
+; LA64D-NEXT:    movgr2fr.w $fa1, $a1
+; LA64D-NEXT:    ffint.s.w $fa1, $fa1
+; LA64D-NEXT:    .p2align 4, , 16
+; LA64D-NEXT:  .LBB24_1: # %atomicrmw.start
+; LA64D-NEXT:    # =>This Loop Header: Depth=1
+; LA64D-NEXT:    # Child Loop BB24_3 Depth 2
+; LA64D-NEXT:    fadd.s $fa2, $fa0, $fa1
+; LA64D-NEXT:    movfr2gr.s $a1, $fa2
+; LA64D-NEXT:    movfr2gr.s $a2, $fa0
+; LA64D-NEXT:  .LBB24_3: # %atomicrmw.start
+; LA64D-NEXT:    # Parent Loop BB24_1 Depth=1
+; LA64D-NEXT:    # => This Inner Loop Header: Depth=2
+; LA64D-NEXT:    ll.w $a3, $a0, 0
+; LA64D-NEXT:    bne $a3, $a2, .LBB24_5
+; LA64D-NEXT:  # %bb.4: # %atomicrmw.start
+; LA64D-NEXT:    # in Loop: Header=BB24_3 Depth=2
+; LA64D-NEXT:    move $a4, $a1
+; LA64D-NEXT:    sc.w $a4, $a0, 0
+; LA64D-NEXT:    beqz $a4, .LBB24_3
+; LA64D-NEXT:    b .LBB24_6
+; LA64D-NEXT:  .LBB24_5: # %atomicrmw.start
+; LA64D-NEXT:    # in Loop: Header=BB24_1 Depth=1
+; LA64D-NEXT:    dbar 0
+; LA64D-NEXT:  .LBB24_6: # %atomicrmw.start
+; LA64D-NEXT:    # in Loop: Header=BB24_1 Depth=1
+; LA64D-NEXT:    movgr2fr.w $fa0, $a3
+; LA64D-NEXT:    addi.w $a1, $a2, 0
+; LA64D-NEXT:    bne $a3, $a1, .LBB24_1
+; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64D-NEXT:    ret
+  %v = atomicrmw fadd ptr %p, float 1.0 seq_cst, align 4
+  ret float %v
+}
+
+define float @float_fsub_seq_cst(ptr %p) nounwind {
+; LA64F-LABEL: float_fsub_seq_cst:
+; LA64F:       # %bb.0:
+; LA64F-NEXT:    fld.s $fa0, $a0, 0
+; LA64F-NEXT:    pcalau12i $a1, %pc_hi20(.LCPI25_0)
+; LA64F-NEXT:    addi.d $a1, $a1, %pc_lo12(.LCPI25_0)
+; LA64F-NEXT:    fld.s $fa1, $a1, 0
+; LA64F-NEXT:    .p2align 4, , 16
+; LA64F-NEXT:  .LBB25_1: # %atomicrmw.start
+; LA64F-NEXT:    # =>This Loop Header: Depth=1
+; LA64F-NEXT:    # Child Loop BB25_3 Depth 2
+; LA64F-NEXT:    fadd.s $fa2, $fa0, $fa1
+; LA64F-NEXT:    movfr2gr.s $a1, $fa2
+; LA64F-NEXT:    movfr2gr.s $a2, $fa0
+; LA64F-NEXT:  .LBB25_3: # %atomicrmw.start
+; LA64F-NEXT:    # Parent Loop BB25_1 Depth=1
+; LA64F-NEXT:    # => This Inner Loop Header: Depth=2
+; LA64F-NEXT:    ll.w $a3, $a0, 0
+; LA64F-NEXT:    bne $a3, $a2, .LBB25_5
+; LA64F-NEXT:  # %bb.4: # %atomicrmw.start
+; LA64F-NEXT:    # in Loop: Header=BB25_3 Depth=2
+; LA64F-NEXT:    move $a4, $a1
+; LA64F-NEXT:    sc.w $a4, $a0, 0
+; LA64F-NEXT:    beqz $a4, .LBB25_3
+; LA64F-NEXT:    b .LBB25_6
+; LA64F-NEXT:  .LBB25_5: # %atomicrmw.start
+; LA64F-NEXT:    # in Loop: Header=BB25_1 Depth=1
+; LA64F-NEXT:    dbar 0
+; LA64F-NEXT:  .LBB25_6: # %atomicrmw.start
+; LA64F-NEXT:    # in Loop: Header=BB25_1 Depth=1
+; LA64F-NEXT:    movgr2fr.w $fa0, $a3
+; LA64F-NEXT:    addi.w $a1, $a2, 0
+; LA64F-NEXT:    bne $a3, $a1, .LBB25_1
+; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64F-NEXT:    ret
+;
+; LA64D-LABEL: float_fsub_seq_cst:
+; LA64D:       # %bb.0:
+; LA64D-NEXT:    fld.s $fa0, $a0, 0
+; LA64D-NEXT:    pcalau12i $a1, %pc_hi20(.LCPI25_0)
+; LA64D-NEXT:    addi.d $a1, $a1, %pc_lo12(.LCPI25_0)
+; LA64D-NEXT:    fld.s $fa1, $a1, 0
+; LA64D-NEXT:    .p2align 4, , 16
+; LA64D-NEXT:  .LBB25_1: # %atomicrmw.start
+; LA64D-NEXT:    # =>This Loop Header: Depth=1
+; LA64D-NEXT:    # Child Loop BB25_3 Depth 2
+; LA64D-NEXT:    fadd.s $fa2, $fa0, $fa1
+; LA64D-NEXT:    movfr2gr.s $a1, $fa2
+; LA64D-NEXT:    movfr2gr.s $a2, $fa0
+; LA64D-NEXT:  .LBB25_3: # %atomicrmw.start
+; LA64D-NEXT:    # Parent Loop BB25_1 Depth=1
+; LA64D-NEXT:    # => This Inner Loop Header: Depth=2
+; LA64D-NEXT:    ll.w $a3, $a0, 0
+; LA64D-NEXT:    bne $a3, $a2, .LBB25_5
+; LA64D-NEXT:  # %bb.4: # %atomicrmw.start
+; LA64D-NEXT:    # in Loop: Header=BB25_3 Depth=2
+; LA64D-NEXT:    move $a4, $a1
+; LA64D-NEXT:    sc.w $a4, $a0, 0
+; LA64D-NEXT:    beqz $a4, .LBB25_3
+; LA64D-NEXT:    b .LBB25_6
+; LA64D-NEXT:  .LBB25_5: # %atomicrmw.start
+; LA64D-NEXT:    # in Loop: Header=BB25_1 Depth=1
+; LA64D-NEXT:    dbar 0
+; LA64D-NEXT:  .LBB25_6: # %atomicrmw.start
+; LA64D-NEXT:    # in Loop: Header=BB25_1 Depth=1
+; LA64D-NEXT:    movgr2fr.w $fa0, $a3
+; LA64D-NEXT:    addi.w $a1, $a2, 0
+; LA64D-NEXT:    bne $a3, $a1, .LBB25_1
+; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64D-NEXT:    ret
+  %v = atomicrmw fsub ptr %p, float 1.0 seq_cst, align 4
+  ret float %v
+}
+
+define float @float_fmin_seq_cst(ptr %p) nounwind {
+; LA64F-LABEL: float_fmin_seq_cst:
+; LA64F:       # %bb.0:
+; LA64F-NEXT:    fld.s $fa0, $a0, 0
+; LA64F-NEXT:    addi.w $a1, $zero, 1
+; LA64F-NEXT:    movgr2fr.w $fa1, $a1
+; LA64F-NEXT:    ffint.s.w $fa1, $fa1
+; LA64F-NEXT:    .p2align 4, , 16
+; LA64F-NEXT:  .LBB26_1: # %atomicrmw.start
+; LA64F-NEXT:    # =>This Loop Header: Depth=1
+; LA64F-NEXT:    # Child Loop BB26_3 Depth 2
+; LA64F-NEXT:    fmax.s $fa2, $fa0, $fa0
+; LA64F-NEXT:    fmin.s $fa2, $fa2, $fa1
+; LA64F-NEXT:    movfr2gr.s $a1, $fa2
+; LA64F-NEXT:    movfr2gr.s $a2, $fa0
+; LA64F-NEXT:  .LBB26_3: # %atomicrmw.start
+; LA64F-NEXT:    # Parent Loop BB26_1 Depth=1
+; LA64F-NEXT:    # => This Inner Loop Header: Depth=2
+; LA64F-NEXT:    ll.w $a3, $a0, 0
+; LA64F-NEXT:    bne $a3, $a2, .LBB26_5
+; LA64F-NEXT:  # %bb.4: # %atomicrmw.start
+; LA64F-NEXT:    # in Loop: Header=BB26_3 Depth=2
+; LA64F-NEXT:    move $a4, $a1
+; LA64F-NEXT:    sc.w $a4, $a0, 0
+; LA64F-NEXT:    beqz $a4, .LBB26_3
+; LA64F-NEXT:    b .LBB26_6
+; LA64F-NEXT:  .LBB26_5: # %atomicrmw.start
+; LA64F-NEXT:    # in Loop: Header=BB26_1 Depth=1
+; LA64F-NEXT:    dbar 0
+; LA64F-NEXT:  .LBB26_6: # %atomicrmw.start
+; LA64F-NEXT:    # in Loop: Header=BB26_1 Depth=1
+; LA64F-NEXT:    movgr2fr.w $fa0, $a3
+; LA64F-NEXT:    addi.w $a1, $a2, 0
+; LA64F-NEXT:    bne $a3, $a1, .LBB26_1
+; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64F-NEXT:    ret
+;
+; LA64D-LABEL: float_fmin_seq_cst:
+; LA64D:       # %bb.0:
+; LA64D-NEXT:    fld.s $fa0, $a0, 0
+; LA64D-NEXT:    addi.w $a1, $zero, 1
+; LA64D-NEXT:    movgr2fr.w $fa1, $a1
+; LA64D-NEXT:    ffint.s.w $fa1, $fa1
+; LA64D-NEXT:    .p2align 4, , 16
+; LA64D-NEXT:  .LBB26_1: # %atomicrmw.start
+; LA64D-NEXT:    # =>This Loop Header: Depth=1
+; LA64D-NEXT:    # Child Loop BB26_3 Depth 2
+; LA64D-NEXT:    fmax.s $fa2, $fa0, $fa0
+; LA64D-NEXT:    fmin.s $fa2, $fa2, $fa1
+; LA64D-NEXT:    movfr2gr.s $a1, $fa2
+; LA64D-NEXT:    movfr2gr.s $a2, $fa0
+; LA64D-NEXT:  .LBB26_3: # %atomicrmw.start
+; LA64D-NEXT:    # Parent Loop BB26_1 Depth=1
+; LA64D-NEXT:    # => This Inner Loop Header: Depth=2
+; LA64D-NEXT:    ll.w $a3, $a0, 0
+; LA64D-NEXT:    bne $a3, $a2, .LBB26_5
+; LA64D-NEXT:  # %bb.4: # %atomicrmw.start
+; LA64D-NEXT:    # in Loop: Header=BB26_3 Depth=2
+; LA64D-NEXT:    move $a4, $a1
+; LA64D-NEXT:    sc.w $a4, $a0, 0
+; LA64D-NEXT:    beqz $a4, .LBB26_3
+; LA64D-NEXT:    b .LBB26_6
+; LA64D-NEXT:  .LBB26_5: # %atomicrmw.start
+; LA64D-NEXT:    # in Loop: Header=BB26_1 Depth=1
+; LA64D-NEXT:    dbar 0
+; LA64D-NEXT:  .LBB26_6: # %atomicrmw.start
+; LA64D-NEXT:    # in Loop: Header=BB26_1 Depth=1
+; LA64D-NEXT:    movgr2fr.w $fa0, $a3
+; LA64D-NEXT:    addi.w $a1, $a2, 0
+; LA64D-NEXT:    bne $a3, $a1, .LBB26_1
+; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64D-NEXT:    ret
+  %v = atomicrmw fmin ptr %p, float 1.0 seq_cst, align 4
+  ret float %v
+}
+
+define float @float_fmax_seq_cst(ptr %p) nounwind {
+; LA64F-LABEL: float_fmax_seq_cst:
+; LA64F:       # %bb.0:
+; LA64F-NEXT:    fld.s $fa0, $a0, 0
+; LA64F-NEXT:    addi.w $a1, $zero, 1
+; LA64F-NEXT:    movgr2fr.w $fa1, $a1
+; LA64F-NEXT:    ffint.s.w $fa1, $fa1
+; LA64F-NEXT:    .p2align 4, , 16
+; LA64F-NEXT:  .LBB27_1: # %atomicrmw.start
+; LA64F-NEXT:    # =>This Loop Header: Depth=1
+; LA64F-NEXT:    # Child Loop BB27_3 Depth 2
+; LA64F-NEXT:    fmax.s $fa2, $fa0, $fa0
+; LA64F-NEXT:    fmax.s $fa2, $fa2, $fa1
+; LA64F-NEXT:    movfr2gr.s $a1, $fa2
+; LA64F-NEXT:    movfr2gr.s $a2, $fa0
+; LA64F-NEXT:  .LBB27_3: # %atomicrmw.start
+; LA64F-NEXT:    # Parent Loop BB27_1 Depth=1
+; LA64F-NEXT:    # => This Inner Loop Header: Depth=2
+; LA64F-NEXT:    ll.w $a3, $a0, 0
+; LA64F-NEXT:    bne $a3, $a2, .LBB27_5
+; LA64F-NEXT:  # %bb.4: # %atomicrmw.start
+; LA64F-NEXT:    # in Loop: Header=BB27_3 Depth=2
+; LA64F-NEXT:    move $a4, $a1
+; LA64F-NEXT:    sc.w $a4, $a0, 0
+; LA64F-NEXT:    beqz $a4, .LBB27_3
+; LA64F-NEXT:    b .LBB27_6
+; LA64F-NEXT:  .LBB27_5: # %atomicrmw.start
+; LA64F-NEXT:    # in Loop: Header=BB27_1 Depth=1
+; LA64F-NEXT:    dbar 0
+; LA64F-NEXT:  .LBB27_6: # %atomicrmw.start
+; LA64F-NEXT:    # in Loop: Header=BB27_1 Depth=1
+; LA64F-NEXT:    movgr2fr.w $fa0, $a3
+; LA64F-NEXT:    addi.w $a1, $a2, 0
+; LA64F-NEXT:    bne $a3, $a1, .LBB27_1
+; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64F-NEXT:    ret
+;
+; LA64D-LABEL: float_fmax_seq_cst:
+; LA64D:       # %bb.0:
+; LA64D-NEXT:    fld.s $fa0, $a0, 0
+; LA64D-NEXT:    addi.w $a1, $zero, 1
+; LA64D-NEXT:    movgr2fr.w $fa1, $a1
+; LA64D-NEXT:    ffint.s.w $fa1, $fa1
+; LA64D-NEXT:    .p2align 4, , 16
+; LA64D-NEXT:  .LBB27_1: # %atomicrmw.start
+; LA64D-NEXT:    # =>This Loop Header: Depth=1
+; LA64D-NEXT:    # Child Loop BB27_3 Depth 2
+; LA64D-NEXT:    fmax.s $fa2, $fa0, $fa0
+; LA64D-NEXT:    fmax.s $fa2, $fa2, $fa1
+; LA64D-NEXT:    movfr2gr.s $a1, $fa2
+; LA64D-NEXT:    movfr2gr.s $a2, $fa0
+; LA64D-NEXT:  .LBB27_3: # %atomicrmw.start
+; LA64D-NEXT:    # Parent Loop BB27_1 Depth=1
+; LA64D-NEXT:    # => This Inner Loop Header: Depth=2
+; LA64D-NEXT:    ll.w $a3, $a0, 0
+; LA64D-NEXT:    bne $a3, $a2, .LBB27_5
+; LA64D-NEXT:  # %bb.4: # %atomicrmw.start
+; LA64D-NEXT:    # in Loop: Header=BB27_3 Depth=2
+; LA64D-NEXT:    move $a4, $a1
+; LA64D-NEXT:    sc.w $a4, $a0, 0
+; LA64D-NEXT:    beqz $a4, .LBB27_3
+; LA64D-NEXT:    b .LBB27_6
+; LA64D-NEXT:  .LBB27_5: # %atomicrmw.start
+; LA64D-NEXT:    # in Loop: Header=BB27_1 Depth=1
+; LA64D-NEXT:    dbar 0
+; LA64D-NEXT:  .LBB27_6: # %atomicrmw.start
+; LA64D-NEXT:    # in Loop: Header=BB27_1 Depth=1
+; LA64D-NEXT:    movgr2fr.w $fa0, $a3
+; LA64D-NEXT:    addi.w $a1, $a2, 0
+; LA64D-NEXT:    bne $a3, $a1, .LBB27_1
+; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64D-NEXT:    ret
+  %v = atomicrmw fmax ptr %p, float 1.0 seq_cst, align 4
+  ret float %v
+}
+
+define double @double_fadd_seq_cst(ptr %p) nounwind {
+; LA64F-LABEL: double_fadd_seq_cst:
+; LA64F:       # %bb.0:
+; LA64F-NEXT:    addi.d $sp, $sp, -80
+; LA64F-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s4, $sp, 24 # 8-byte Folded Spill
+; LA64F-NEXT:    move $fp, $a0
+; LA64F-NEXT:    ld.d $a0, $a0, 0
+; LA64F-NEXT:    lu52i.d $s0, $zero, 1023
+; LA64F-NEXT:    ori $s1, $zero, 8
+; LA64F-NEXT:    addi.d $s2, $sp, 16
+; LA64F-NEXT:    addi.d $s3, $sp, 8
+; LA64F-NEXT:    ori $s4, $zero, 5
+; LA64F-NEXT:    .p2align 4, , 16
+; LA64F-NEXT:  .LBB28_1: # %atomicrmw.start
+; LA64F-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64F-NEXT:    st.d $a0, $sp, 16
+; LA64F-NEXT:    move $a1, $s0
+; LA64F-NEXT:    bl %plt(__adddf3)
+; LA64F-NEXT:    st.d $a0, $sp, 8
+; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    move $a1, $fp
+; LA64F-NEXT:    move $a2, $s2
+; LA64F-NEXT:    move $a3, $s3
+; LA64F-NEXT:    move $a4, $s4
+; LA64F-NEXT:    move $a5, $s4
+; LA64F-NEXT:    bl %plt(__atomic_compare_exchange)
+; LA64F-NEXT:    move $a1, $a0
+; LA64F-NEXT:    ld.d $a0, $sp, 16
+; LA64F-NEXT:    beqz $a1, .LBB28_1
+; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64F-NEXT:    ld.d $s4, $sp, 24 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
+; LA64F-NEXT:    addi.d $sp, $sp, 80
+; LA64F-NEXT:    ret
+;
+; LA64D-LABEL: double_fadd_seq_cst:
+; LA64D:       # %bb.0:
+; LA64D-NEXT:    addi.d $sp, $sp, -80
+; LA64D-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
+; LA64D-NEXT:    fst.d $fs0, $sp, 24 # 8-byte Folded Spill
+; LA64D-NEXT:    move $fp, $a0
+; LA64D-NEXT:    fld.d $fa0, $a0, 0
+; LA64D-NEXT:    addi.d $a0, $zero, 1
+; LA64D-NEXT:    movgr2fr.d $fa1, $a0
+; LA64D-NEXT:    ffint.d.l $fs0, $fa1
+; LA64D-NEXT:    ori $s0, $zero, 8
+; LA64D-NEXT:    addi.d $s1, $sp, 16
+; LA64D-NEXT:    addi.d $s2, $sp, 8
+; LA64D-NEXT:    ori $s3, $zero, 5
+; LA64D-NEXT:    .p2align 4, , 16
+; LA64D-NEXT:  .LBB28_1: # %atomicrmw.start
+; LA64D-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64D-NEXT:    fst.d $fa0, $sp, 16
+; LA64D-NEXT:    fadd.d $fa0, $fa0, $fs0
+; LA64D-NEXT:    fst.d $fa0, $sp, 8
+; LA64D-NEXT:    move $a0, $s0
+; LA64D-NEXT:    move $a1, $fp
+; LA64D-NEXT:    move $a2, $s1
+; LA64D-NEXT:    move $a3, $s2
+; LA64D-NEXT:    move $a4, $s3
+; LA64D-NEXT:    move $a5, $s3
+; LA64D-NEXT:    bl %plt(__atomic_compare_exchange)
+; LA64D-NEXT:    fld.d $fa0, $sp, 16
+; LA64D-NEXT:    beqz $a0, .LBB28_1
+; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64D-NEXT:    fld.d $fs0, $sp, 24 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
+; LA64D-NEXT:    addi.d $sp, $sp, 80
+; LA64D-NEXT:    ret
+  %v = atomicrmw fadd ptr %p, double 1.0 seq_cst, align 4
+  ret double %v
+}
+
+define double @double_fsub_seq_cst(ptr %p) nounwind {
+; LA64F-LABEL: double_fsub_seq_cst:
+; LA64F:       # %bb.0:
+; LA64F-NEXT:    addi.d $sp, $sp, -80
+; LA64F-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s4, $sp, 24 # 8-byte Folded Spill
+; LA64F-NEXT:    move $fp, $a0
+; LA64F-NEXT:    ld.d $a0, $a0, 0
+; LA64F-NEXT:    lu52i.d $s0, $zero, -1025
+; LA64F-NEXT:    ori $s1, $zero, 8
+; LA64F-NEXT:    addi.d $s2, $sp, 16
+; LA64F-NEXT:    addi.d $s3, $sp, 8
+; LA64F-NEXT:    ori $s4, $zero, 5
+; LA64F-NEXT:    .p2align 4, , 16
+; LA64F-NEXT:  .LBB29_1: # %atomicrmw.start
+; LA64F-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64F-NEXT:    st.d $a0, $sp, 16
+; LA64F-NEXT:    move $a1, $s0
+; LA64F-NEXT:    bl %plt(__adddf3)
+; LA64F-NEXT:    st.d $a0, $sp, 8
+; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    move $a1, $fp
+; LA64F-NEXT:    move $a2, $s2
+; LA64F-NEXT:    move $a3, $s3
+; LA64F-NEXT:    move $a4, $s4
+; LA64F-NEXT:    move $a5, $s4
+; LA64F-NEXT:    bl %plt(__atomic_compare_exchange)
+; LA64F-NEXT:    move $a1, $a0
+; LA64F-NEXT:    ld.d $a0, $sp, 16
+; LA64F-NEXT:    beqz $a1, .LBB29_1
+; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64F-NEXT:    ld.d $s4, $sp, 24 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
+; LA64F-NEXT:    addi.d $sp, $sp, 80
+; LA64F-NEXT:    ret
+;
+; LA64D-LABEL: double_fsub_seq_cst:
+; LA64D:       # %bb.0:
+; LA64D-NEXT:    addi.d $sp, $sp, -80
+; LA64D-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
+; LA64D-NEXT:    fst.d $fs0, $sp, 24 # 8-byte Folded Spill
+; LA64D-NEXT:    move $fp, $a0
+; LA64D-NEXT:    fld.d $fa0, $a0, 0
+; LA64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI29_0)
+; LA64D-NEXT:    addi.d $a0, $a0, %pc_lo12(.LCPI29_0)
+; LA64D-NEXT:    fld.d $fs0, $a0, 0
+; LA64D-NEXT:    ori $s0, $zero, 8
+; LA64D-NEXT:    addi.d $s1, $sp, 16
+; LA64D-NEXT:    addi.d $s2, $sp, 8
+; LA64D-NEXT:    ori $s3, $zero, 5
+; LA64D-NEXT:    .p2align 4, , 16
+; LA64D-NEXT:  .LBB29_1: # %atomicrmw.start
+; LA64D-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64D-NEXT:    fst.d $fa0, $sp, 16
+; LA64D-NEXT:    fadd.d $fa0, $fa0, $fs0
+; LA64D-NEXT:    fst.d $fa0, $sp, 8
+; LA64D-NEXT:    move $a0, $s0
+; LA64D-NEXT:    move $a1, $fp
+; LA64D-NEXT:    move $a2, $s1
+; LA64D-NEXT:    move $a3, $s2
+; LA64D-NEXT:    move $a4, $s3
+; LA64D-NEXT:    move $a5, $s3
+; LA64D-NEXT:    bl %plt(__atomic_compare_exchange)
+; LA64D-NEXT:    fld.d $fa0, $sp, 16
+; LA64D-NEXT:    beqz $a0, .LBB29_1
+; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64D-NEXT:    fld.d $fs0, $sp, 24 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
+; LA64D-NEXT:    addi.d $sp, $sp, 80
+; LA64D-NEXT:    ret
+  %v = atomicrmw fsub ptr %p, double 1.0 seq_cst, align 4
+  ret double %v
+}
+
+define double @double_fmin_seq_cst(ptr %p) nounwind {
+; LA64F-LABEL: double_fmin_seq_cst:
+; LA64F:       # %bb.0:
+; LA64F-NEXT:    addi.d $sp, $sp, -80
+; LA64F-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s4, $sp, 24 # 8-byte Folded Spill
+; LA64F-NEXT:    move $fp, $a0
+; LA64F-NEXT:    ld.d $a0, $a0, 0
+; LA64F-NEXT:    lu52i.d $s0, $zero, 1023
+; LA64F-NEXT:    ori $s1, $zero, 8
+; LA64F-NEXT:    addi.d $s2, $sp, 16
+; LA64F-NEXT:    addi.d $s3, $sp, 8
+; LA64F-NEXT:    ori $s4, $zero, 5
+; LA64F-NEXT:    .p2align 4, , 16
+; LA64F-NEXT:  .LBB30_1: # %atomicrmw.start
+; LA64F-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64F-NEXT:    st.d $a0, $sp, 16
+; LA64F-NEXT:    move $a1, $s0
+; LA64F-NEXT:    bl %plt(fmin)
+; LA64F-NEXT:    st.d $a0, $sp, 8
+; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    move $a1, $fp
+; LA64F-NEXT:    move $a2, $s2
+; LA64F-NEXT:    move $a3, $s3
+; LA64F-NEXT:    move $a4, $s4
+; LA64F-NEXT:    move $a5, $s4
+; LA64F-NEXT:    bl %plt(__atomic_compare_exchange)
+; LA64F-NEXT:    move $a1, $a0
+; LA64F-NEXT:    ld.d $a0, $sp, 16
+; LA64F-NEXT:    beqz $a1, .LBB30_1
+; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64F-NEXT:    ld.d $s4, $sp, 24 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
+; LA64F-NEXT:    addi.d $sp, $sp, 80
+; LA64F-NEXT:    ret
+;
+; LA64D-LABEL: double_fmin_seq_cst:
+; LA64D:       # %bb.0:
+; LA64D-NEXT:    addi.d $sp, $sp, -80
+; LA64D-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
+; LA64D-NEXT:    fst.d $fs0, $sp, 24 # 8-byte Folded Spill
+; LA64D-NEXT:    move $fp, $a0
+; LA64D-NEXT:    fld.d $fa0, $a0, 0
+; LA64D-NEXT:    addi.d $a0, $zero, 1
+; LA64D-NEXT:    movgr2fr.d $fa1, $a0
+; LA64D-NEXT:    ffint.d.l $fs0, $fa1
+; LA64D-NEXT:    ori $s0, $zero, 8
+; LA64D-NEXT:    addi.d $s1, $sp, 16
+; LA64D-NEXT:    addi.d $s2, $sp, 8
+; LA64D-NEXT:    ori $s3, $zero, 5
+; LA64D-NEXT:    .p2align 4, , 16
+; LA64D-NEXT:  .LBB30_1: # %atomicrmw.start
+; LA64D-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64D-NEXT:    fst.d $fa0, $sp, 16
+; LA64D-NEXT:    fmax.d $fa0, $fa0, $fa0
+; LA64D-NEXT:    fmin.d $fa0, $fa0, $fs0
+; LA64D-NEXT:    fst.d $fa0, $sp, 8
+; LA64D-NEXT:    move $a0, $s0
+; LA64D-NEXT:    move $a1, $fp
+; LA64D-NEXT:    move $a2, $s1
+; LA64D-NEXT:    move $a3, $s2
+; LA64D-NEXT:    move $a4, $s3
+; LA64D-NEXT:    move $a5, $s3
+; LA64D-NEXT:    bl %plt(__atomic_compare_exchange)
+; LA64D-NEXT:    fld.d $fa0, $sp, 16
+; LA64D-NEXT:    beqz $a0, .LBB30_1
+; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64D-NEXT:    fld.d $fs0, $sp, 24 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
+; LA64D-NEXT:    addi.d $sp, $sp, 80
+; LA64D-NEXT:    ret
+  %v = atomicrmw fmin ptr %p, double 1.0 seq_cst, align 4
+  ret double %v
+}
+
+define double @double_fmax_seq_cst(ptr %p) nounwind {
+; LA64F-LABEL: double_fmax_seq_cst:
+; LA64F:       # %bb.0:
+; LA64F-NEXT:    addi.d $sp, $sp, -80
+; LA64F-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s4, $sp, 24 # 8-byte Folded Spill
+; LA64F-NEXT:    move $fp, $a0
+; LA64F-NEXT:    ld.d $a0, $a0, 0
+; LA64F-NEXT:    lu52i.d $s0, $zero, 1023
+; LA64F-NEXT:    ori $s1, $zero, 8
+; LA64F-NEXT:    addi.d $s2, $sp, 16
+; LA64F-NEXT:    addi.d $s3, $sp, 8
+; LA64F-NEXT:    ori $s4, $zero, 5
+; LA64F-NEXT:    .p2align 4, , 16
+; LA64F-NEXT:  .LBB31_1: # %atomicrmw.start
+; LA64F-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64F-NEXT:    st.d $a0, $sp, 16
+; LA64F-NEXT:    move $a1, $s0
+; LA64F-NEXT:    bl %plt(fmax)
+; LA64F-NEXT:    st.d $a0, $sp, 8
+; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    move $a1, $fp
+; LA64F-NEXT:    move $a2, $s2
+; LA64F-NEXT:    move $a3, $s3
+; LA64F-NEXT:    move $a4, $s4
+; LA64F-NEXT:    move $a5, $s4
+; LA64F-NEXT:    bl %plt(__atomic_compare_exchange)
+; LA64F-NEXT:    move $a1, $a0
+; LA64F-NEXT:    ld.d $a0, $sp, 16
+; LA64F-NEXT:    beqz $a1, .LBB31_1
+; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64F-NEXT:    ld.d $s4, $sp, 24 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
+; LA64F-NEXT:    addi.d $sp, $sp, 80
+; LA64F-NEXT:    ret
+;
+; LA64D-LABEL: double_fmax_seq_cst:
+; LA64D:       # %bb.0:
+; LA64D-NEXT:    addi.d $sp, $sp, -80
+; LA64D-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $fp, $sp, 64 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $s0, $sp, 56 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $s1, $sp, 48 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $s2, $sp, 40 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $s3, $sp, 32 # 8-byte Folded Spill
+; LA64D-NEXT:    fst.d $fs0, $sp, 24 # 8-byte Folded Spill
+; LA64D-NEXT:    move $fp, $a0
+; LA64D-NEXT:    fld.d $fa0, $a0, 0
+; LA64D-NEXT:    addi.d $a0, $zero, 1
+; LA64D-NEXT:    movgr2fr.d $fa1, $a0
+; LA64D-NEXT:    ffint.d.l $fs0, $fa1
+; LA64D-NEXT:    ori $s0, $zero, 8
+; LA64D-NEXT:    addi.d $s1, $sp, 16
+; LA64D-NEXT:    addi.d $s2, $sp, 8
+; LA64D-NEXT:    ori $s3, $zero, 5
+; LA64D-NEXT:    .p2align 4, , 16
+; LA64D-NEXT:  .LBB31_1: # %atomicrmw.start
+; LA64D-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64D-NEXT:    fst.d $fa0, $sp, 16
+; LA64D-NEXT:    fmax.d $fa0, $fa0, $fa0
+; LA64D-NEXT:    fmax.d $fa0, $fa0, $fs0
+; LA64D-NEXT:    fst.d $fa0, $sp, 8
+; LA64D-NEXT:    move $a0, $s0
+; LA64D-NEXT:    move $a1, $fp
+; LA64D-NEXT:    move $a2, $s1
+; LA64D-NEXT:    move $a3, $s2
+; LA64D-NEXT:    move $a4, $s3
+; LA64D-NEXT:    move $a5, $s3
+; LA64D-NEXT:    bl %plt(__atomic_compare_exchange)
+; LA64D-NEXT:    fld.d $fa0, $sp, 16
+; LA64D-NEXT:    beqz $a0, .LBB31_1
+; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64D-NEXT:    fld.d $fs0, $sp, 24 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $s3, $sp, 32 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $s2, $sp, 40 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $s1, $sp, 48 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $s0, $sp, 56 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $fp, $sp, 64 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
+; LA64D-NEXT:    addi.d $sp, $sp, 80
+; LA64D-NEXT:    ret
+  %v = atomicrmw fmax ptr %p, double 1.0 seq_cst, align 4
+  ret double %v
+}
+
+define float @float_fadd_monotonic(ptr %p) nounwind {
+; LA64F-LABEL: float_fadd_monotonic:
+; LA64F:       # %bb.0:
+; LA64F-NEXT:    fld.s $fa0, $a0, 0
+; LA64F-NEXT:    addi.w $a1, $zero, 1
+; LA64F-NEXT:    movgr2fr.w $fa1, $a1
+; LA64F-NEXT:    ffint.s.w $fa1, $fa1
+; LA64F-NEXT:    .p2align 4, , 16
+; LA64F-NEXT:  .LBB32_1: # %atomicrmw.start
+; LA64F-NEXT:    # =>This Loop Header: Depth=1
+; LA64F-NEXT:    # Child Loop BB32_3 Depth 2
+; LA64F-NEXT:    fadd.s $fa2, $fa0, $fa1
+; LA64F-NEXT:    movfr2gr.s $a1, $fa2
+; LA64F-NEXT:    movfr2gr.s $a2, $fa0
+; LA64F-NEXT:  .LBB32_3: # %atomicrmw.start
+; LA64F-NEXT:    # Parent Loop BB32_1 Depth=1
+; LA64F-NEXT:    # => This Inner Loop Header: Depth=2
+; LA64F-NEXT:    ll.w $a3, $a0, 0
+; LA64F-NEXT:    bne $a3, $a2, .LBB32_5
+; LA64F-NEXT:  # %bb.4: # %atomicrmw.start
+; LA64F-NEXT:    # in Loop: Header=BB32_3 Depth=2
+; LA64F-NEXT:    move $a4, $a1
+; LA64F-NEXT:    sc.w $a4, $a0, 0
+; LA64F-NEXT:    beqz $a4, .LBB32_3
+; LA64F-NEXT:    b .LBB32_6
+; LA64F-NEXT:  .LBB32_5: # %atomicrmw.start
+; LA64F-NEXT:    # in Loop: Header=BB32_1 Depth=1
+; LA64F-NEXT:    dbar 1792
+; LA64F-NEXT:  .LBB32_6: # %atomicrmw.start
+; LA64F-NEXT:    # in Loop: Header=BB32_1 Depth=1
+; LA64F-NEXT:    movgr2fr.w $fa0, $a3
+; LA64F-NEXT:    addi.w $a1, $a2, 0
+; LA64F-NEXT:    bne $a3, $a1, .LBB32_1
+; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64F-NEXT:    ret
+;
+; LA64D-LABEL: float_fadd_monotonic:
+; LA64D:       # %bb.0:
+; LA64D-NEXT:    fld.s $fa0, $a0, 0
+; LA64D-NEXT:    addi.w $a1, $zero, 1
+; LA64D-NEXT:    movgr2fr.w $fa1, $a1
+; LA64D-NEXT:    ffint.s.w $fa1, $fa1
+; LA64D-NEXT:    .p2align 4, , 16
+; LA64D-NEXT:  .LBB32_1: # %atomicrmw.start
+; LA64D-NEXT:    # =>This Loop Header: Depth=1
+; LA64D-NEXT:    # Child Loop BB32_3 Depth 2
+; LA64D-NEXT:    fadd.s $fa2, $fa0, $fa1
+; LA64D-NEXT:    movfr2gr.s $a1, $fa2
+; LA64D-NEXT:    movfr2gr.s $a2, $fa0
+; LA64D-NEXT:  .LBB32_3: # %atomicrmw.start
+; LA64D-NEXT:    # Parent Loop BB32_1 Depth=1
+; LA64D-NEXT:    # => This Inner Loop Header: Depth=2
+; LA64D-NEXT:    ll.w $a3, $a0, 0
+; LA64D-NEXT:    bne $a3, $a2, .LBB32_5
+; LA64D-NEXT:  # %bb.4: # %atomicrmw.start
+; LA64D-NEXT:    # in Loop: Header=BB32_3 Depth=2
+; LA64D-NEXT:    move $a4, $a1
+; LA64D-NEXT:    sc.w $a4, $a0, 0
+; LA64D-NEXT:    beqz $a4, .LBB32_3
+; LA64D-NEXT:    b .LBB32_6
+; LA64D-NEXT:  .LBB32_5: # %atomicrmw.start
+; LA64D-NEXT:    # in Loop: Header=BB32_1 Depth=1
+; LA64D-NEXT:    dbar 1792
+; LA64D-NEXT:  .LBB32_6: # %atomicrmw.start
+; LA64D-NEXT:    # in Loop: Header=BB32_1 Depth=1
+; LA64D-NEXT:    movgr2fr.w $fa0, $a3
+; LA64D-NEXT:    addi.w $a1, $a2, 0
+; LA64D-NEXT:    bne $a3, $a1, .LBB32_1
+; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64D-NEXT:    ret
+  %v = atomicrmw fadd ptr %p, float 1.0 monotonic, align 4
+  ret float %v
+}
+
+define float @float_fsub_monotonic(ptr %p) nounwind {
+; LA64F-LABEL: float_fsub_monotonic:
+; LA64F:       # %bb.0:
+; LA64F-NEXT:    fld.s $fa0, $a0, 0
+; LA64F-NEXT:    pcalau12i $a1, %pc_hi20(.LCPI33_0)
+; LA64F-NEXT:    addi.d $a1, $a1, %pc_lo12(.LCPI33_0)
+; LA64F-NEXT:    fld.s $fa1, $a1, 0
+; LA64F-NEXT:    .p2align 4, , 16
+; LA64F-NEXT:  .LBB33_1: # %atomicrmw.start
+; LA64F-NEXT:    # =>This Loop Header: Depth=1
+; LA64F-NEXT:    # Child Loop BB33_3 Depth 2
+; LA64F-NEXT:    fadd.s $fa2, $fa0, $fa1
+; LA64F-NEXT:    movfr2gr.s $a1, $fa2
+; LA64F-NEXT:    movfr2gr.s $a2, $fa0
+; LA64F-NEXT:  .LBB33_3: # %atomicrmw.start
+; LA64F-NEXT:    # Parent Loop BB33_1 Depth=1
+; LA64F-NEXT:    # => This Inner Loop Header: Depth=2
+; LA64F-NEXT:    ll.w $a3, $a0, 0
+; LA64F-NEXT:    bne $a3, $a2, .LBB33_5
+; LA64F-NEXT:  # %bb.4: # %atomicrmw.start
+; LA64F-NEXT:    # in Loop: Header=BB33_3 Depth=2
+; LA64F-NEXT:    move $a4, $a1
+; LA64F-NEXT:    sc.w $a4, $a0, 0
+; LA64F-NEXT:    beqz $a4, .LBB33_3
+; LA64F-NEXT:    b .LBB33_6
+; LA64F-NEXT:  .LBB33_5: # %atomicrmw.start
+; LA64F-NEXT:    # in Loop: Header=BB33_1 Depth=1
+; LA64F-NEXT:    dbar 1792
+; LA64F-NEXT:  .LBB33_6: # %atomicrmw.start
+; LA64F-NEXT:    # in Loop: Header=BB33_1 Depth=1
+; LA64F-NEXT:    movgr2fr.w $fa0, $a3
+; LA64F-NEXT:    addi.w $a1, $a2, 0
+; LA64F-NEXT:    bne $a3, $a1, .LBB33_1
+; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64F-NEXT:    ret
+;
+; LA64D-LABEL: float_fsub_monotonic:
+; LA64D:       # %bb.0:
+; LA64D-NEXT:    fld.s $fa0, $a0, 0
+; LA64D-NEXT:    pcalau12i $a1, %pc_hi20(.LCPI33_0)
+; LA64D-NEXT:    addi.d $a1, $a1, %pc_lo12(.LCPI33_0)
+; LA64D-NEXT:    fld.s $fa1, $a1, 0
+; LA64D-NEXT:    .p2align 4, , 16
+; LA64D-NEXT:  .LBB33_1: # %atomicrmw.start
+; LA64D-NEXT:    # =>This Loop Header: Depth=1
+; LA64D-NEXT:    # Child Loop BB33_3 Depth 2
+; LA64D-NEXT:    fadd.s $fa2, $fa0, $fa1
+; LA64D-NEXT:    movfr2gr.s $a1, $fa2
+; LA64D-NEXT:    movfr2gr.s $a2, $fa0
+; LA64D-NEXT:  .LBB33_3: # %atomicrmw.start
+; LA64D-NEXT:    # Parent Loop BB33_1 Depth=1
+; LA64D-NEXT:    # => This Inner Loop Header: Depth=2
+; LA64D-NEXT:    ll.w $a3, $a0, 0
+; LA64D-NEXT:    bne $a3, $a2, .LBB33_5
+; LA64D-NEXT:  # %bb.4: # %atomicrmw.start
+; LA64D-NEXT:    # in Loop: Header=BB33_3 Depth=2
+; LA64D-NEXT:    move $a4, $a1
+; LA64D-NEXT:    sc.w $a4, $a0, 0
+; LA64D-NEXT:    beqz $a4, .LBB33_3
+; LA64D-NEXT:    b .LBB33_6
+; LA64D-NEXT:  .LBB33_5: # %atomicrmw.start
+; LA64D-NEXT:    # in Loop: Header=BB33_1 Depth=1
+; LA64D-NEXT:    dbar 1792
+; LA64D-NEXT:  .LBB33_6: # %atomicrmw.start
+; LA64D-NEXT:    # in Loop: Header=BB33_1 Depth=1
+; LA64D-NEXT:    movgr2fr.w $fa0, $a3
+; LA64D-NEXT:    addi.w $a1, $a2, 0
+; LA64D-NEXT:    bne $a3, $a1, .LBB33_1
+; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64D-NEXT:    ret
+  %v = atomicrmw fsub ptr %p, float 1.0 monotonic, align 4
+  ret float %v
+}
+
+define float @float_fmin_monotonic(ptr %p) nounwind {
+; LA64F-LABEL: float_fmin_monotonic:
+; LA64F:       # %bb.0:
+; LA64F-NEXT:    fld.s $fa0, $a0, 0
+; LA64F-NEXT:    addi.w $a1, $zero, 1
+; LA64F-NEXT:    movgr2fr.w $fa1, $a1
+; LA64F-NEXT:    ffint.s.w $fa1, $fa1
+; LA64F-NEXT:    .p2align 4, , 16
+; LA64F-NEXT:  .LBB34_1: # %atomicrmw.start
+; LA64F-NEXT:    # =>This Loop Header: Depth=1
+; LA64F-NEXT:    # Child Loop BB34_3 Depth 2
+; LA64F-NEXT:    fmax.s $fa2, $fa0, $fa0
+; LA64F-NEXT:    fmin.s $fa2, $fa2, $fa1
+; LA64F-NEXT:    movfr2gr.s $a1, $fa2
+; LA64F-NEXT:    movfr2gr.s $a2, $fa0
+; LA64F-NEXT:  .LBB34_3: # %atomicrmw.start
+; LA64F-NEXT:    # Parent Loop BB34_1 Depth=1
+; LA64F-NEXT:    # => This Inner Loop Header: Depth=2
+; LA64F-NEXT:    ll.w $a3, $a0, 0
+; LA64F-NEXT:    bne $a3, $a2, .LBB34_5
+; LA64F-NEXT:  # %bb.4: # %atomicrmw.start
+; LA64F-NEXT:    # in Loop: Header=BB34_3 Depth=2
+; LA64F-NEXT:    move $a4, $a1
+; LA64F-NEXT:    sc.w $a4, $a0, 0
+; LA64F-NEXT:    beqz $a4, .LBB34_3
+; LA64F-NEXT:    b .LBB34_6
+; LA64F-NEXT:  .LBB34_5: # %atomicrmw.start
+; LA64F-NEXT:    # in Loop: Header=BB34_1 Depth=1
+; LA64F-NEXT:    dbar 1792
+; LA64F-NEXT:  .LBB34_6: # %atomicrmw.start
+; LA64F-NEXT:    # in Loop: Header=BB34_1 Depth=1
+; LA64F-NEXT:    movgr2fr.w $fa0, $a3
+; LA64F-NEXT:    addi.w $a1, $a2, 0
+; LA64F-NEXT:    bne $a3, $a1, .LBB34_1
+; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64F-NEXT:    ret
+;
+; LA64D-LABEL: float_fmin_monotonic:
+; LA64D:       # %bb.0:
+; LA64D-NEXT:    fld.s $fa0, $a0, 0
+; LA64D-NEXT:    addi.w $a1, $zero, 1
+; LA64D-NEXT:    movgr2fr.w $fa1, $a1
+; LA64D-NEXT:    ffint.s.w $fa1, $fa1
+; LA64D-NEXT:    .p2align 4, , 16
+; LA64D-NEXT:  .LBB34_1: # %atomicrmw.start
+; LA64D-NEXT:    # =>This Loop Header: Depth=1
+; LA64D-NEXT:    # Child Loop BB34_3 Depth 2
+; LA64D-NEXT:    fmax.s $fa2, $fa0, $fa0
+; LA64D-NEXT:    fmin.s $fa2, $fa2, $fa1
+; LA64D-NEXT:    movfr2gr.s $a1, $fa2
+; LA64D-NEXT:    movfr2gr.s $a2, $fa0
+; LA64D-NEXT:  .LBB34_3: # %atomicrmw.start
+; LA64D-NEXT:    # Parent Loop BB34_1 Depth=1
+; LA64D-NEXT:    # => This Inner Loop Header: Depth=2
+; LA64D-NEXT:    ll.w $a3, $a0, 0
+; LA64D-NEXT:    bne $a3, $a2, .LBB34_5
+; LA64D-NEXT:  # %bb.4: # %atomicrmw.start
+; LA64D-NEXT:    # in Loop: Header=BB34_3 Depth=2
+; LA64D-NEXT:    move $a4, $a1
+; LA64D-NEXT:    sc.w $a4, $a0, 0
+; LA64D-NEXT:    beqz $a4, .LBB34_3
+; LA64D-NEXT:    b .LBB34_6
+; LA64D-NEXT:  .LBB34_5: # %atomicrmw.start
+; LA64D-NEXT:    # in Loop: Header=BB34_1 Depth=1
+; LA64D-NEXT:    dbar 1792
+; LA64D-NEXT:  .LBB34_6: # %atomicrmw.start
+; LA64D-NEXT:    # in Loop: Header=BB34_1 Depth=1
+; LA64D-NEXT:    movgr2fr.w $fa0, $a3
+; LA64D-NEXT:    addi.w $a1, $a2, 0
+; LA64D-NEXT:    bne $a3, $a1, .LBB34_1
+; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64D-NEXT:    ret
+  %v = atomicrmw fmin ptr %p, float 1.0 monotonic, align 4
+  ret float %v
+}
+
+define float @float_fmax_monotonic(ptr %p) nounwind {
+; LA64F-LABEL: float_fmax_monotonic:
+; LA64F:       # %bb.0:
+; LA64F-NEXT:    fld.s $fa0, $a0, 0
+; LA64F-NEXT:    addi.w $a1, $zero, 1
+; LA64F-NEXT:    movgr2fr.w $fa1, $a1
+; LA64F-NEXT:    ffint.s.w $fa1, $fa1
+; LA64F-NEXT:    .p2align 4, , 16
+; LA64F-NEXT:  .LBB35_1: # %atomicrmw.start
+; LA64F-NEXT:    # =>This Loop Header: Depth=1
+; LA64F-NEXT:    # Child Loop BB35_3 Depth 2
+; LA64F-NEXT:    fmax.s $fa2, $fa0, $fa0
+; LA64F-NEXT:    fmax.s $fa2, $fa2, $fa1
+; LA64F-NEXT:    movfr2gr.s $a1, $fa2
+; LA64F-NEXT:    movfr2gr.s $a2, $fa0
+; LA64F-NEXT:  .LBB35_3: # %atomicrmw.start
+; LA64F-NEXT:    # Parent Loop BB35_1 Depth=1
+; LA64F-NEXT:    # => This Inner Loop Header: Depth=2
+; LA64F-NEXT:    ll.w $a3, $a0, 0
+; LA64F-NEXT:    bne $a3, $a2, .LBB35_5
+; LA64F-NEXT:  # %bb.4: # %atomicrmw.start
+; LA64F-NEXT:    # in Loop: Header=BB35_3 Depth=2
+; LA64F-NEXT:    move $a4, $a1
+; LA64F-NEXT:    sc.w $a4, $a0, 0
+; LA64F-NEXT:    beqz $a4, .LBB35_3
+; LA64F-NEXT:    b .LBB35_6
+; LA64F-NEXT:  .LBB35_5: # %atomicrmw.start
+; LA64F-NEXT:    # in Loop: Header=BB35_1 Depth=1
+; LA64F-NEXT:    dbar 1792
+; LA64F-NEXT:  .LBB35_6: # %atomicrmw.start
+; LA64F-NEXT:    # in Loop: Header=BB35_1 Depth=1
+; LA64F-NEXT:    movgr2fr.w $fa0, $a3
+; LA64F-NEXT:    addi.w $a1, $a2, 0
+; LA64F-NEXT:    bne $a3, $a1, .LBB35_1
+; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64F-NEXT:    ret
+;
+; LA64D-LABEL: float_fmax_monotonic:
+; LA64D:       # %bb.0:
+; LA64D-NEXT:    fld.s $fa0, $a0, 0
+; LA64D-NEXT:    addi.w $a1, $zero, 1
+; LA64D-NEXT:    movgr2fr.w $fa1, $a1
+; LA64D-NEXT:    ffint.s.w $fa1, $fa1
+; LA64D-NEXT:    .p2align 4, , 16
+; LA64D-NEXT:  .LBB35_1: # %atomicrmw.start
+; LA64D-NEXT:    # =>This Loop Header: Depth=1
+; LA64D-NEXT:    # Child Loop BB35_3 Depth 2
+; LA64D-NEXT:    fmax.s $fa2, $fa0, $fa0
+; LA64D-NEXT:    fmax.s $fa2, $fa2, $fa1
+; LA64D-NEXT:    movfr2gr.s $a1, $fa2
+; LA64D-NEXT:    movfr2gr.s $a2, $fa0
+; LA64D-NEXT:  .LBB35_3: # %atomicrmw.start
+; LA64D-NEXT:    # Parent Loop BB35_1 Depth=1
+; LA64D-NEXT:    # => This Inner Loop Header: Depth=2
+; LA64D-NEXT:    ll.w $a3, $a0, 0
+; LA64D-NEXT:    bne $a3, $a2, .LBB35_5
+; LA64D-NEXT:  # %bb.4: # %atomicrmw.start
+; LA64D-NEXT:    # in Loop: Header=BB35_3 Depth=2
+; LA64D-NEXT:    move $a4, $a1
+; LA64D-NEXT:    sc.w $a4, $a0, 0
+; LA64D-NEXT:    beqz $a4, .LBB35_3
+; LA64D-NEXT:    b .LBB35_6
+; LA64D-NEXT:  .LBB35_5: # %atomicrmw.start
+; LA64D-NEXT:    # in Loop: Header=BB35_1 Depth=1
+; LA64D-NEXT:    dbar 1792
+; LA64D-NEXT:  .LBB35_6: # %atomicrmw.start
+; LA64D-NEXT:    # in Loop: Header=BB35_1 Depth=1
+; LA64D-NEXT:    movgr2fr.w $fa0, $a3
+; LA64D-NEXT:    addi.w $a1, $a2, 0
+; LA64D-NEXT:    bne $a3, $a1, .LBB35_1
+; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64D-NEXT:    ret
+  %v = atomicrmw fmax ptr %p, float 1.0 monotonic, align 4
+  ret float %v
+}
+
+define double @double_fadd_monotonic(ptr %p) nounwind {
+; LA64F-LABEL: double_fadd_monotonic:
+; LA64F:       # %bb.0:
+; LA64F-NEXT:    addi.d $sp, $sp, -64
+; LA64F-NEXT:    st.d $ra, $sp, 56 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $fp, $sp, 48 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s0, $sp, 40 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s1, $sp, 32 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s2, $sp, 24 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s3, $sp, 16 # 8-byte Folded Spill
+; LA64F-NEXT:    move $fp, $a0
+; LA64F-NEXT:    ld.d $a0, $a0, 0
+; LA64F-NEXT:    lu52i.d $s0, $zero, 1023
+; LA64F-NEXT:    ori $s1, $zero, 8
+; LA64F-NEXT:    addi.d $s2, $sp, 8
+; LA64F-NEXT:    addi.d $s3, $sp, 0
+; LA64F-NEXT:    .p2align 4, , 16
+; LA64F-NEXT:  .LBB36_1: # %atomicrmw.start
+; LA64F-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64F-NEXT:    st.d $a0, $sp, 8
+; LA64F-NEXT:    move $a1, $s0
+; LA64F-NEXT:    bl %plt(__adddf3)
+; LA64F-NEXT:    st.d $a0, $sp, 0
+; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    move $a1, $fp
+; LA64F-NEXT:    move $a2, $s2
+; LA64F-NEXT:    move $a3, $s3
+; LA64F-NEXT:    move $a4, $zero
+; LA64F-NEXT:    move $a5, $zero
+; LA64F-NEXT:    bl %plt(__atomic_compare_exchange)
+; LA64F-NEXT:    move $a1, $a0
+; LA64F-NEXT:    ld.d $a0, $sp, 8
+; LA64F-NEXT:    beqz $a1, .LBB36_1
+; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64F-NEXT:    ld.d $s3, $sp, 16 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s2, $sp, 24 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s1, $sp, 32 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s0, $sp, 40 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $fp, $sp, 48 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $ra, $sp, 56 # 8-byte Folded Reload
+; LA64F-NEXT:    addi.d $sp, $sp, 64
+; LA64F-NEXT:    ret
+;
+; LA64D-LABEL: double_fadd_monotonic:
+; LA64D:       # %bb.0:
+; LA64D-NEXT:    addi.d $sp, $sp, -64
+; LA64D-NEXT:    st.d $ra, $sp, 56 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $fp, $sp, 48 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $s0, $sp, 40 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $s1, $sp, 32 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $s2, $sp, 24 # 8-byte Folded Spill
+; LA64D-NEXT:    fst.d $fs0, $sp, 16 # 8-byte Folded Spill
+; LA64D-NEXT:    move $fp, $a0
+; LA64D-NEXT:    fld.d $fa0, $a0, 0
+; LA64D-NEXT:    addi.d $a0, $zero, 1
+; LA64D-NEXT:    movgr2fr.d $fa1, $a0
+; LA64D-NEXT:    ffint.d.l $fs0, $fa1
+; LA64D-NEXT:    ori $s0, $zero, 8
+; LA64D-NEXT:    addi.d $s1, $sp, 8
+; LA64D-NEXT:    addi.d $s2, $sp, 0
+; LA64D-NEXT:    .p2align 4, , 16
+; LA64D-NEXT:  .LBB36_1: # %atomicrmw.start
+; LA64D-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64D-NEXT:    fst.d $fa0, $sp, 8
+; LA64D-NEXT:    fadd.d $fa0, $fa0, $fs0
+; LA64D-NEXT:    fst.d $fa0, $sp, 0
+; LA64D-NEXT:    move $a0, $s0
+; LA64D-NEXT:    move $a1, $fp
+; LA64D-NEXT:    move $a2, $s1
+; LA64D-NEXT:    move $a3, $s2
+; LA64D-NEXT:    move $a4, $zero
+; LA64D-NEXT:    move $a5, $zero
+; LA64D-NEXT:    bl %plt(__atomic_compare_exchange)
+; LA64D-NEXT:    fld.d $fa0, $sp, 8
+; LA64D-NEXT:    beqz $a0, .LBB36_1
+; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64D-NEXT:    fld.d $fs0, $sp, 16 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $s2, $sp, 24 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $s1, $sp, 32 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $s0, $sp, 40 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $fp, $sp, 48 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $ra, $sp, 56 # 8-byte Folded Reload
+; LA64D-NEXT:    addi.d $sp, $sp, 64
+; LA64D-NEXT:    ret
+  %v = atomicrmw fadd ptr %p, double 1.0 monotonic, align 4
+  ret double %v
+}
+
+define double @double_fsub_monotonic(ptr %p) nounwind {
+; LA64F-LABEL: double_fsub_monotonic:
+; LA64F:       # %bb.0:
+; LA64F-NEXT:    addi.d $sp, $sp, -64
+; LA64F-NEXT:    st.d $ra, $sp, 56 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $fp, $sp, 48 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s0, $sp, 40 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s1, $sp, 32 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s2, $sp, 24 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s3, $sp, 16 # 8-byte Folded Spill
+; LA64F-NEXT:    move $fp, $a0
+; LA64F-NEXT:    ld.d $a0, $a0, 0
+; LA64F-NEXT:    lu52i.d $s0, $zero, -1025
+; LA64F-NEXT:    ori $s1, $zero, 8
+; LA64F-NEXT:    addi.d $s2, $sp, 8
+; LA64F-NEXT:    addi.d $s3, $sp, 0
+; LA64F-NEXT:    .p2align 4, , 16
+; LA64F-NEXT:  .LBB37_1: # %atomicrmw.start
+; LA64F-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64F-NEXT:    st.d $a0, $sp, 8
+; LA64F-NEXT:    move $a1, $s0
+; LA64F-NEXT:    bl %plt(__adddf3)
+; LA64F-NEXT:    st.d $a0, $sp, 0
+; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    move $a1, $fp
+; LA64F-NEXT:    move $a2, $s2
+; LA64F-NEXT:    move $a3, $s3
+; LA64F-NEXT:    move $a4, $zero
+; LA64F-NEXT:    move $a5, $zero
+; LA64F-NEXT:    bl %plt(__atomic_compare_exchange)
+; LA64F-NEXT:    move $a1, $a0
+; LA64F-NEXT:    ld.d $a0, $sp, 8
+; LA64F-NEXT:    beqz $a1, .LBB37_1
+; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64F-NEXT:    ld.d $s3, $sp, 16 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s2, $sp, 24 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s1, $sp, 32 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s0, $sp, 40 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $fp, $sp, 48 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $ra, $sp, 56 # 8-byte Folded Reload
+; LA64F-NEXT:    addi.d $sp, $sp, 64
+; LA64F-NEXT:    ret
+;
+; LA64D-LABEL: double_fsub_monotonic:
+; LA64D:       # %bb.0:
+; LA64D-NEXT:    addi.d $sp, $sp, -64
+; LA64D-NEXT:    st.d $ra, $sp, 56 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $fp, $sp, 48 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $s0, $sp, 40 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $s1, $sp, 32 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $s2, $sp, 24 # 8-byte Folded Spill
+; LA64D-NEXT:    fst.d $fs0, $sp, 16 # 8-byte Folded Spill
+; LA64D-NEXT:    move $fp, $a0
+; LA64D-NEXT:    fld.d $fa0, $a0, 0
+; LA64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI37_0)
+; LA64D-NEXT:    addi.d $a0, $a0, %pc_lo12(.LCPI37_0)
+; LA64D-NEXT:    fld.d $fs0, $a0, 0
+; LA64D-NEXT:    ori $s0, $zero, 8
+; LA64D-NEXT:    addi.d $s1, $sp, 8
+; LA64D-NEXT:    addi.d $s2, $sp, 0
+; LA64D-NEXT:    .p2align 4, , 16
+; LA64D-NEXT:  .LBB37_1: # %atomicrmw.start
+; LA64D-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64D-NEXT:    fst.d $fa0, $sp, 8
+; LA64D-NEXT:    fadd.d $fa0, $fa0, $fs0
+; LA64D-NEXT:    fst.d $fa0, $sp, 0
+; LA64D-NEXT:    move $a0, $s0
+; LA64D-NEXT:    move $a1, $fp
+; LA64D-NEXT:    move $a2, $s1
+; LA64D-NEXT:    move $a3, $s2
+; LA64D-NEXT:    move $a4, $zero
+; LA64D-NEXT:    move $a5, $zero
+; LA64D-NEXT:    bl %plt(__atomic_compare_exchange)
+; LA64D-NEXT:    fld.d $fa0, $sp, 8
+; LA64D-NEXT:    beqz $a0, .LBB37_1
+; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64D-NEXT:    fld.d $fs0, $sp, 16 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $s2, $sp, 24 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $s1, $sp, 32 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $s0, $sp, 40 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $fp, $sp, 48 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $ra, $sp, 56 # 8-byte Folded Reload
+; LA64D-NEXT:    addi.d $sp, $sp, 64
+; LA64D-NEXT:    ret
+  %v = atomicrmw fsub ptr %p, double 1.0 monotonic, align 4
+  ret double %v
+}
+
+define double @double_fmin_monotonic(ptr %p) nounwind {
+; LA64F-LABEL: double_fmin_monotonic:
+; LA64F:       # %bb.0:
+; LA64F-NEXT:    addi.d $sp, $sp, -64
+; LA64F-NEXT:    st.d $ra, $sp, 56 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $fp, $sp, 48 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s0, $sp, 40 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s1, $sp, 32 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s2, $sp, 24 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s3, $sp, 16 # 8-byte Folded Spill
+; LA64F-NEXT:    move $fp, $a0
+; LA64F-NEXT:    ld.d $a0, $a0, 0
+; LA64F-NEXT:    lu52i.d $s0, $zero, 1023
+; LA64F-NEXT:    ori $s1, $zero, 8
+; LA64F-NEXT:    addi.d $s2, $sp, 8
+; LA64F-NEXT:    addi.d $s3, $sp, 0
+; LA64F-NEXT:    .p2align 4, , 16
+; LA64F-NEXT:  .LBB38_1: # %atomicrmw.start
+; LA64F-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64F-NEXT:    st.d $a0, $sp, 8
+; LA64F-NEXT:    move $a1, $s0
+; LA64F-NEXT:    bl %plt(fmin)
+; LA64F-NEXT:    st.d $a0, $sp, 0
+; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    move $a1, $fp
+; LA64F-NEXT:    move $a2, $s2
+; LA64F-NEXT:    move $a3, $s3
+; LA64F-NEXT:    move $a4, $zero
+; LA64F-NEXT:    move $a5, $zero
+; LA64F-NEXT:    bl %plt(__atomic_compare_exchange)
+; LA64F-NEXT:    move $a1, $a0
+; LA64F-NEXT:    ld.d $a0, $sp, 8
+; LA64F-NEXT:    beqz $a1, .LBB38_1
+; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64F-NEXT:    ld.d $s3, $sp, 16 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s2, $sp, 24 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s1, $sp, 32 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s0, $sp, 40 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $fp, $sp, 48 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $ra, $sp, 56 # 8-byte Folded Reload
+; LA64F-NEXT:    addi.d $sp, $sp, 64
+; LA64F-NEXT:    ret
+;
+; LA64D-LABEL: double_fmin_monotonic:
+; LA64D:       # %bb.0:
+; LA64D-NEXT:    addi.d $sp, $sp, -64
+; LA64D-NEXT:    st.d $ra, $sp, 56 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $fp, $sp, 48 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $s0, $sp, 40 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $s1, $sp, 32 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $s2, $sp, 24 # 8-byte Folded Spill
+; LA64D-NEXT:    fst.d $fs0, $sp, 16 # 8-byte Folded Spill
+; LA64D-NEXT:    move $fp, $a0
+; LA64D-NEXT:    fld.d $fa0, $a0, 0
+; LA64D-NEXT:    addi.d $a0, $zero, 1
+; LA64D-NEXT:    movgr2fr.d $fa1, $a0
+; LA64D-NEXT:    ffint.d.l $fs0, $fa1
+; LA64D-NEXT:    ori $s0, $zero, 8
+; LA64D-NEXT:    addi.d $s1, $sp, 8
+; LA64D-NEXT:    addi.d $s2, $sp, 0
+; LA64D-NEXT:    .p2align 4, , 16
+; LA64D-NEXT:  .LBB38_1: # %atomicrmw.start
+; LA64D-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64D-NEXT:    fst.d $fa0, $sp, 8
+; LA64D-NEXT:    fmax.d $fa0, $fa0, $fa0
+; LA64D-NEXT:    fmin.d $fa0, $fa0, $fs0
+; LA64D-NEXT:    fst.d $fa0, $sp, 0
+; LA64D-NEXT:    move $a0, $s0
+; LA64D-NEXT:    move $a1, $fp
+; LA64D-NEXT:    move $a2, $s1
+; LA64D-NEXT:    move $a3, $s2
+; LA64D-NEXT:    move $a4, $zero
+; LA64D-NEXT:    move $a5, $zero
+; LA64D-NEXT:    bl %plt(__atomic_compare_exchange)
+; LA64D-NEXT:    fld.d $fa0, $sp, 8
+; LA64D-NEXT:    beqz $a0, .LBB38_1
+; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64D-NEXT:    fld.d $fs0, $sp, 16 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $s2, $sp, 24 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $s1, $sp, 32 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $s0, $sp, 40 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $fp, $sp, 48 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $ra, $sp, 56 # 8-byte Folded Reload
+; LA64D-NEXT:    addi.d $sp, $sp, 64
+; LA64D-NEXT:    ret
+  %v = atomicrmw fmin ptr %p, double 1.0 monotonic, align 4
+  ret double %v
+}
+
+define double @double_fmax_monotonic(ptr %p) nounwind {
+; LA64F-LABEL: double_fmax_monotonic:
+; LA64F:       # %bb.0:
+; LA64F-NEXT:    addi.d $sp, $sp, -64
+; LA64F-NEXT:    st.d $ra, $sp, 56 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $fp, $sp, 48 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s0, $sp, 40 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s1, $sp, 32 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s2, $sp, 24 # 8-byte Folded Spill
+; LA64F-NEXT:    st.d $s3, $sp, 16 # 8-byte Folded Spill
+; LA64F-NEXT:    move $fp, $a0
+; LA64F-NEXT:    ld.d $a0, $a0, 0
+; LA64F-NEXT:    lu52i.d $s0, $zero, 1023
+; LA64F-NEXT:    ori $s1, $zero, 8
+; LA64F-NEXT:    addi.d $s2, $sp, 8
+; LA64F-NEXT:    addi.d $s3, $sp, 0
+; LA64F-NEXT:    .p2align 4, , 16
+; LA64F-NEXT:  .LBB39_1: # %atomicrmw.start
+; LA64F-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64F-NEXT:    st.d $a0, $sp, 8
+; LA64F-NEXT:    move $a1, $s0
+; LA64F-NEXT:    bl %plt(fmax)
+; LA64F-NEXT:    st.d $a0, $sp, 0
+; LA64F-NEXT:    move $a0, $s1
+; LA64F-NEXT:    move $a1, $fp
+; LA64F-NEXT:    move $a2, $s2
+; LA64F-NEXT:    move $a3, $s3
+; LA64F-NEXT:    move $a4, $zero
+; LA64F-NEXT:    move $a5, $zero
+; LA64F-NEXT:    bl %plt(__atomic_compare_exchange)
+; LA64F-NEXT:    move $a1, $a0
+; LA64F-NEXT:    ld.d $a0, $sp, 8
+; LA64F-NEXT:    beqz $a1, .LBB39_1
+; LA64F-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64F-NEXT:    ld.d $s3, $sp, 16 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s2, $sp, 24 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s1, $sp, 32 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $s0, $sp, 40 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $fp, $sp, 48 # 8-byte Folded Reload
+; LA64F-NEXT:    ld.d $ra, $sp, 56 # 8-byte Folded Reload
+; LA64F-NEXT:    addi.d $sp, $sp, 64
+; LA64F-NEXT:    ret
+;
+; LA64D-LABEL: double_fmax_monotonic:
+; LA64D:       # %bb.0:
+; LA64D-NEXT:    addi.d $sp, $sp, -64
+; LA64D-NEXT:    st.d $ra, $sp, 56 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $fp, $sp, 48 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $s0, $sp, 40 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $s1, $sp, 32 # 8-byte Folded Spill
+; LA64D-NEXT:    st.d $s2, $sp, 24 # 8-byte Folded Spill
+; LA64D-NEXT:    fst.d $fs0, $sp, 16 # 8-byte Folded Spill
+; LA64D-NEXT:    move $fp, $a0
+; LA64D-NEXT:    fld.d $fa0, $a0, 0
+; LA64D-NEXT:    addi.d $a0, $zero, 1
+; LA64D-NEXT:    movgr2fr.d $fa1, $a0
+; LA64D-NEXT:    ffint.d.l $fs0, $fa1
+; LA64D-NEXT:    ori $s0, $zero, 8
+; LA64D-NEXT:    addi.d $s1, $sp, 8
+; LA64D-NEXT:    addi.d $s2, $sp, 0
+; LA64D-NEXT:    .p2align 4, , 16
+; LA64D-NEXT:  .LBB39_1: # %atomicrmw.start
+; LA64D-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64D-NEXT:    fst.d $fa0, $sp, 8
+; LA64D-NEXT:    fmax.d $fa0, $fa0, $fa0
+; LA64D-NEXT:    fmax.d $fa0, $fa0, $fs0
+; LA64D-NEXT:    fst.d $fa0, $sp, 0
+; LA64D-NEXT:    move $a0, $s0
+; LA64D-NEXT:    move $a1, $fp
+; LA64D-NEXT:    move $a2, $s1
+; LA64D-NEXT:    move $a3, $s2
+; LA64D-NEXT:    move $a4, $zero
+; LA64D-NEXT:    move $a5, $zero
+; LA64D-NEXT:    bl %plt(__atomic_compare_exchange)
+; LA64D-NEXT:    fld.d $fa0, $sp, 8
+; LA64D-NEXT:    beqz $a0, .LBB39_1
+; LA64D-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64D-NEXT:    fld.d $fs0, $sp, 16 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $s2, $sp, 24 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $s1, $sp, 32 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $s0, $sp, 40 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $fp, $sp, 48 # 8-byte Folded Reload
+; LA64D-NEXT:    ld.d $ra, $sp, 56 # 8-byte Folded Reload
+; LA64D-NEXT:    addi.d $sp, $sp, 64
+; LA64D-NEXT:    ret
+  %v = atomicrmw fmax ptr %p, double 1.0 monotonic, align 4
+  ret double %v
+}

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw-minmax.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw-minmax.ll
index 27b8cc17fd64f23..464c9ce97c5a6ef 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw-minmax.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw-minmax.ll
@@ -345,3 +345,1371 @@ define i64 @atomicrmw_min_i64_acquire(ptr %a, i64 %b) nounwind {
   %1 = atomicrmw min ptr %a, i64 %b acquire
   ret i64 %1
 }
+
+define i8 @atomicrmw_umax_i8_release(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_umax_i8_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    ori $a3, $zero, 255
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    addi.w $a3, $a3, 0
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    addi.w $a1, $a1, 0
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:  .LBB16_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    and $a6, $a4, $a3
+; LA64-NEXT:    move $a5, $a4
+; LA64-NEXT:    bgeu $a6, $a1, .LBB16_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB16_1 Depth=1
+; LA64-NEXT:    xor $a5, $a4, $a1
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:  .LBB16_3: # in Loop: Header=BB16_1 Depth=1
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB16_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw umax ptr %a, i8 %b release
+  ret i8 %1
+}
+
+define i16 @atomicrmw_umax_i16_release(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_umax_i16_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    lu12i.w $a2, 15
+; LA64-NEXT:    ori $a2, $a2, 4095
+; LA64-NEXT:    slli.d $a3, $a0, 3
+; LA64-NEXT:    sll.w $a2, $a2, $a3
+; LA64-NEXT:    addi.w $a2, $a2, 0
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    sll.w $a1, $a1, $a3
+; LA64-NEXT:    addi.w $a1, $a1, 0
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:  .LBB17_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    and $a6, $a4, $a2
+; LA64-NEXT:    move $a5, $a4
+; LA64-NEXT:    bgeu $a6, $a1, .LBB17_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB17_1 Depth=1
+; LA64-NEXT:    xor $a5, $a4, $a1
+; LA64-NEXT:    and $a5, $a5, $a2
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:  .LBB17_3: # in Loop: Header=BB17_1 Depth=1
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB17_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a4, $a3
+; LA64-NEXT:    ret
+  %1 = atomicrmw umax ptr %a, i16 %b release
+  ret i16 %1
+}
+
+define i32 @atomicrmw_umax_i32_release(ptr %a, i32 %b) nounwind {
+; LA64-LABEL: atomicrmw_umax_i32_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    ammax_db.wu $a2, $a1, $a0
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw umax ptr %a, i32 %b release
+  ret i32 %1
+}
+
+define i64 @atomicrmw_umax_i64_release(ptr %a, i64 %b) nounwind {
+; LA64-LABEL: atomicrmw_umax_i64_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    ammax_db.du $a2, $a1, $a0
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw umax ptr %a, i64 %b release
+  ret i64 %1
+}
+
+define i8 @atomicrmw_umin_i8_release(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_umin_i8_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    ori $a3, $zero, 255
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    addi.w $a3, $a3, 0
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    addi.w $a1, $a1, 0
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:  .LBB20_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    and $a6, $a4, $a3
+; LA64-NEXT:    move $a5, $a4
+; LA64-NEXT:    bgeu $a1, $a6, .LBB20_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB20_1 Depth=1
+; LA64-NEXT:    xor $a5, $a4, $a1
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:  .LBB20_3: # in Loop: Header=BB20_1 Depth=1
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB20_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw umin ptr %a, i8 %b release
+  ret i8 %1
+}
+
+define i16 @atomicrmw_umin_i16_release(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_umin_i16_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    lu12i.w $a2, 15
+; LA64-NEXT:    ori $a2, $a2, 4095
+; LA64-NEXT:    slli.d $a3, $a0, 3
+; LA64-NEXT:    sll.w $a2, $a2, $a3
+; LA64-NEXT:    addi.w $a2, $a2, 0
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    sll.w $a1, $a1, $a3
+; LA64-NEXT:    addi.w $a1, $a1, 0
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:  .LBB21_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    and $a6, $a4, $a2
+; LA64-NEXT:    move $a5, $a4
+; LA64-NEXT:    bgeu $a1, $a6, .LBB21_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB21_1 Depth=1
+; LA64-NEXT:    xor $a5, $a4, $a1
+; LA64-NEXT:    and $a5, $a5, $a2
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:  .LBB21_3: # in Loop: Header=BB21_1 Depth=1
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB21_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a4, $a3
+; LA64-NEXT:    ret
+  %1 = atomicrmw umin ptr %a, i16 %b release
+  ret i16 %1
+}
+
+define i32 @atomicrmw_umin_i32_release(ptr %a, i32 %b) nounwind {
+; LA64-LABEL: atomicrmw_umin_i32_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    ammin_db.wu $a2, $a1, $a0
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw umin ptr %a, i32 %b release
+  ret i32 %1
+}
+
+define i64 @atomicrmw_umin_i64_release(ptr %a, i64 %b) nounwind {
+; LA64-LABEL: atomicrmw_umin_i64_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    ammin_db.du $a2, $a1, $a0
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw umin ptr %a, i64 %b release
+  ret i64 %1
+}
+
+define i8 @atomicrmw_max_i8_release(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_max_i8_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    ori $a3, $zero, 255
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    addi.w $a3, $a3, 0
+; LA64-NEXT:    ext.w.b $a1, $a1
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    addi.w $a1, $a1, 0
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    andi $a4, $a2, 24
+; LA64-NEXT:    xori $a4, $a4, 56
+; LA64-NEXT:  .LBB24_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a5, $a0, 0
+; LA64-NEXT:    and $a7, $a5, $a3
+; LA64-NEXT:    move $a6, $a5
+; LA64-NEXT:    sll.w $a7, $a7, $a4
+; LA64-NEXT:    sra.w $a7, $a7, $a4
+; LA64-NEXT:    bge $a7, $a1, .LBB24_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB24_1 Depth=1
+; LA64-NEXT:    xor $a6, $a5, $a1
+; LA64-NEXT:    and $a6, $a6, $a3
+; LA64-NEXT:    xor $a6, $a5, $a6
+; LA64-NEXT:  .LBB24_3: # in Loop: Header=BB24_1 Depth=1
+; LA64-NEXT:    sc.w $a6, $a0, 0
+; LA64-NEXT:    beqz $a6, .LBB24_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a5, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw max ptr %a, i8 %b release
+  ret i8 %1
+}
+
+define i16 @atomicrmw_max_i16_release(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_max_i16_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    andi $a3, $a2, 24
+; LA64-NEXT:    ori $a4, $zero, 48
+; LA64-NEXT:    sub.d $a3, $a4, $a3
+; LA64-NEXT:    lu12i.w $a4, 15
+; LA64-NEXT:    ori $a4, $a4, 4095
+; LA64-NEXT:    sll.w $a4, $a4, $a2
+; LA64-NEXT:    addi.w $a4, $a4, 0
+; LA64-NEXT:    ext.w.h $a1, $a1
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    addi.w $a1, $a1, 0
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:  .LBB25_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a5, $a0, 0
+; LA64-NEXT:    and $a7, $a5, $a4
+; LA64-NEXT:    move $a6, $a5
+; LA64-NEXT:    sll.w $a7, $a7, $a3
+; LA64-NEXT:    sra.w $a7, $a7, $a3
+; LA64-NEXT:    bge $a7, $a1, .LBB25_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB25_1 Depth=1
+; LA64-NEXT:    xor $a6, $a5, $a1
+; LA64-NEXT:    and $a6, $a6, $a4
+; LA64-NEXT:    xor $a6, $a5, $a6
+; LA64-NEXT:  .LBB25_3: # in Loop: Header=BB25_1 Depth=1
+; LA64-NEXT:    sc.w $a6, $a0, 0
+; LA64-NEXT:    beqz $a6, .LBB25_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a5, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw max ptr %a, i16 %b release
+  ret i16 %1
+}
+
+define i32 @atomicrmw_max_i32_release(ptr %a, i32 %b) nounwind {
+; LA64-LABEL: atomicrmw_max_i32_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    ammax_db.w $a2, $a1, $a0
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw max ptr %a, i32 %b release
+  ret i32 %1
+}
+
+define i64 @atomicrmw_max_i64_release(ptr %a, i64 %b) nounwind {
+; LA64-LABEL: atomicrmw_max_i64_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    ammax_db.d $a2, $a1, $a0
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw max ptr %a, i64 %b release
+  ret i64 %1
+}
+
+define i8 @atomicrmw_min_i8_release(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_min_i8_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    ori $a3, $zero, 255
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    addi.w $a3, $a3, 0
+; LA64-NEXT:    ext.w.b $a1, $a1
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    addi.w $a1, $a1, 0
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    andi $a4, $a2, 24
+; LA64-NEXT:    xori $a4, $a4, 56
+; LA64-NEXT:  .LBB28_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a5, $a0, 0
+; LA64-NEXT:    and $a7, $a5, $a3
+; LA64-NEXT:    move $a6, $a5
+; LA64-NEXT:    sll.w $a7, $a7, $a4
+; LA64-NEXT:    sra.w $a7, $a7, $a4
+; LA64-NEXT:    bge $a1, $a7, .LBB28_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB28_1 Depth=1
+; LA64-NEXT:    xor $a6, $a5, $a1
+; LA64-NEXT:    and $a6, $a6, $a3
+; LA64-NEXT:    xor $a6, $a5, $a6
+; LA64-NEXT:  .LBB28_3: # in Loop: Header=BB28_1 Depth=1
+; LA64-NEXT:    sc.w $a6, $a0, 0
+; LA64-NEXT:    beqz $a6, .LBB28_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a5, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw min ptr %a, i8 %b release
+  ret i8 %1
+}
+
+define i16 @atomicrmw_min_i16_release(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_min_i16_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    andi $a3, $a2, 24
+; LA64-NEXT:    ori $a4, $zero, 48
+; LA64-NEXT:    sub.d $a3, $a4, $a3
+; LA64-NEXT:    lu12i.w $a4, 15
+; LA64-NEXT:    ori $a4, $a4, 4095
+; LA64-NEXT:    sll.w $a4, $a4, $a2
+; LA64-NEXT:    addi.w $a4, $a4, 0
+; LA64-NEXT:    ext.w.h $a1, $a1
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    addi.w $a1, $a1, 0
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:  .LBB29_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a5, $a0, 0
+; LA64-NEXT:    and $a7, $a5, $a4
+; LA64-NEXT:    move $a6, $a5
+; LA64-NEXT:    sll.w $a7, $a7, $a3
+; LA64-NEXT:    sra.w $a7, $a7, $a3
+; LA64-NEXT:    bge $a1, $a7, .LBB29_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB29_1 Depth=1
+; LA64-NEXT:    xor $a6, $a5, $a1
+; LA64-NEXT:    and $a6, $a6, $a4
+; LA64-NEXT:    xor $a6, $a5, $a6
+; LA64-NEXT:  .LBB29_3: # in Loop: Header=BB29_1 Depth=1
+; LA64-NEXT:    sc.w $a6, $a0, 0
+; LA64-NEXT:    beqz $a6, .LBB29_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a5, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw min ptr %a, i16 %b release
+  ret i16 %1
+}
+
+define i32 @atomicrmw_min_i32_release(ptr %a, i32 %b) nounwind {
+; LA64-LABEL: atomicrmw_min_i32_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    ammin_db.w $a2, $a1, $a0
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw min ptr %a, i32 %b release
+  ret i32 %1
+}
+
+define i64 @atomicrmw_min_i64_release(ptr %a, i64 %b) nounwind {
+; LA64-LABEL: atomicrmw_min_i64_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    ammin_db.d $a2, $a1, $a0
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw min ptr %a, i64 %b release
+  ret i64 %1
+}
+
+define i8 @atomicrmw_umax_i8_acq_rel(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_umax_i8_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    ori $a3, $zero, 255
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    addi.w $a3, $a3, 0
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    addi.w $a1, $a1, 0
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:  .LBB32_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    and $a6, $a4, $a3
+; LA64-NEXT:    move $a5, $a4
+; LA64-NEXT:    bgeu $a6, $a1, .LBB32_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB32_1 Depth=1
+; LA64-NEXT:    xor $a5, $a4, $a1
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:  .LBB32_3: # in Loop: Header=BB32_1 Depth=1
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB32_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw umax ptr %a, i8 %b acq_rel
+  ret i8 %1
+}
+
+define i16 @atomicrmw_umax_i16_acq_rel(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_umax_i16_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    lu12i.w $a2, 15
+; LA64-NEXT:    ori $a2, $a2, 4095
+; LA64-NEXT:    slli.d $a3, $a0, 3
+; LA64-NEXT:    sll.w $a2, $a2, $a3
+; LA64-NEXT:    addi.w $a2, $a2, 0
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    sll.w $a1, $a1, $a3
+; LA64-NEXT:    addi.w $a1, $a1, 0
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:  .LBB33_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    and $a6, $a4, $a2
+; LA64-NEXT:    move $a5, $a4
+; LA64-NEXT:    bgeu $a6, $a1, .LBB33_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB33_1 Depth=1
+; LA64-NEXT:    xor $a5, $a4, $a1
+; LA64-NEXT:    and $a5, $a5, $a2
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:  .LBB33_3: # in Loop: Header=BB33_1 Depth=1
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB33_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a4, $a3
+; LA64-NEXT:    ret
+  %1 = atomicrmw umax ptr %a, i16 %b acq_rel
+  ret i16 %1
+}
+
+define i32 @atomicrmw_umax_i32_acq_rel(ptr %a, i32 %b) nounwind {
+; LA64-LABEL: atomicrmw_umax_i32_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    ammax_db.wu $a2, $a1, $a0
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw umax ptr %a, i32 %b acq_rel
+  ret i32 %1
+}
+
+define i64 @atomicrmw_umax_i64_acq_rel(ptr %a, i64 %b) nounwind {
+; LA64-LABEL: atomicrmw_umax_i64_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    ammax_db.du $a2, $a1, $a0
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw umax ptr %a, i64 %b acq_rel
+  ret i64 %1
+}
+
+define i8 @atomicrmw_umin_i8_acq_rel(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_umin_i8_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    ori $a3, $zero, 255
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    addi.w $a3, $a3, 0
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    addi.w $a1, $a1, 0
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:  .LBB36_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    and $a6, $a4, $a3
+; LA64-NEXT:    move $a5, $a4
+; LA64-NEXT:    bgeu $a1, $a6, .LBB36_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB36_1 Depth=1
+; LA64-NEXT:    xor $a5, $a4, $a1
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:  .LBB36_3: # in Loop: Header=BB36_1 Depth=1
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB36_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw umin ptr %a, i8 %b acq_rel
+  ret i8 %1
+}
+
+define i16 @atomicrmw_umin_i16_acq_rel(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_umin_i16_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    lu12i.w $a2, 15
+; LA64-NEXT:    ori $a2, $a2, 4095
+; LA64-NEXT:    slli.d $a3, $a0, 3
+; LA64-NEXT:    sll.w $a2, $a2, $a3
+; LA64-NEXT:    addi.w $a2, $a2, 0
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    sll.w $a1, $a1, $a3
+; LA64-NEXT:    addi.w $a1, $a1, 0
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:  .LBB37_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    and $a6, $a4, $a2
+; LA64-NEXT:    move $a5, $a4
+; LA64-NEXT:    bgeu $a1, $a6, .LBB37_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB37_1 Depth=1
+; LA64-NEXT:    xor $a5, $a4, $a1
+; LA64-NEXT:    and $a5, $a5, $a2
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:  .LBB37_3: # in Loop: Header=BB37_1 Depth=1
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB37_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a4, $a3
+; LA64-NEXT:    ret
+  %1 = atomicrmw umin ptr %a, i16 %b acq_rel
+  ret i16 %1
+}
+
+define i32 @atomicrmw_umin_i32_acq_rel(ptr %a, i32 %b) nounwind {
+; LA64-LABEL: atomicrmw_umin_i32_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    ammin_db.wu $a2, $a1, $a0
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw umin ptr %a, i32 %b acq_rel
+  ret i32 %1
+}
+
+define i64 @atomicrmw_umin_i64_acq_rel(ptr %a, i64 %b) nounwind {
+; LA64-LABEL: atomicrmw_umin_i64_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    ammin_db.du $a2, $a1, $a0
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw umin ptr %a, i64 %b acq_rel
+  ret i64 %1
+}
+
+define i8 @atomicrmw_max_i8_acq_rel(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_max_i8_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    ori $a3, $zero, 255
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    addi.w $a3, $a3, 0
+; LA64-NEXT:    ext.w.b $a1, $a1
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    addi.w $a1, $a1, 0
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    andi $a4, $a2, 24
+; LA64-NEXT:    xori $a4, $a4, 56
+; LA64-NEXT:  .LBB40_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a5, $a0, 0
+; LA64-NEXT:    and $a7, $a5, $a3
+; LA64-NEXT:    move $a6, $a5
+; LA64-NEXT:    sll.w $a7, $a7, $a4
+; LA64-NEXT:    sra.w $a7, $a7, $a4
+; LA64-NEXT:    bge $a7, $a1, .LBB40_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB40_1 Depth=1
+; LA64-NEXT:    xor $a6, $a5, $a1
+; LA64-NEXT:    and $a6, $a6, $a3
+; LA64-NEXT:    xor $a6, $a5, $a6
+; LA64-NEXT:  .LBB40_3: # in Loop: Header=BB40_1 Depth=1
+; LA64-NEXT:    sc.w $a6, $a0, 0
+; LA64-NEXT:    beqz $a6, .LBB40_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a5, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw max ptr %a, i8 %b acq_rel
+  ret i8 %1
+}
+
+define i16 @atomicrmw_max_i16_acq_rel(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_max_i16_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    andi $a3, $a2, 24
+; LA64-NEXT:    ori $a4, $zero, 48
+; LA64-NEXT:    sub.d $a3, $a4, $a3
+; LA64-NEXT:    lu12i.w $a4, 15
+; LA64-NEXT:    ori $a4, $a4, 4095
+; LA64-NEXT:    sll.w $a4, $a4, $a2
+; LA64-NEXT:    addi.w $a4, $a4, 0
+; LA64-NEXT:    ext.w.h $a1, $a1
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    addi.w $a1, $a1, 0
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:  .LBB41_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a5, $a0, 0
+; LA64-NEXT:    and $a7, $a5, $a4
+; LA64-NEXT:    move $a6, $a5
+; LA64-NEXT:    sll.w $a7, $a7, $a3
+; LA64-NEXT:    sra.w $a7, $a7, $a3
+; LA64-NEXT:    bge $a7, $a1, .LBB41_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB41_1 Depth=1
+; LA64-NEXT:    xor $a6, $a5, $a1
+; LA64-NEXT:    and $a6, $a6, $a4
+; LA64-NEXT:    xor $a6, $a5, $a6
+; LA64-NEXT:  .LBB41_3: # in Loop: Header=BB41_1 Depth=1
+; LA64-NEXT:    sc.w $a6, $a0, 0
+; LA64-NEXT:    beqz $a6, .LBB41_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a5, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw max ptr %a, i16 %b acq_rel
+  ret i16 %1
+}
+
+define i32 @atomicrmw_max_i32_acq_rel(ptr %a, i32 %b) nounwind {
+; LA64-LABEL: atomicrmw_max_i32_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    ammax_db.w $a2, $a1, $a0
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw max ptr %a, i32 %b acq_rel
+  ret i32 %1
+}
+
+define i64 @atomicrmw_max_i64_acq_rel(ptr %a, i64 %b) nounwind {
+; LA64-LABEL: atomicrmw_max_i64_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    ammax_db.d $a2, $a1, $a0
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw max ptr %a, i64 %b acq_rel
+  ret i64 %1
+}
+
+define i8 @atomicrmw_min_i8_acq_rel(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_min_i8_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    ori $a3, $zero, 255
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    addi.w $a3, $a3, 0
+; LA64-NEXT:    ext.w.b $a1, $a1
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    addi.w $a1, $a1, 0
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    andi $a4, $a2, 24
+; LA64-NEXT:    xori $a4, $a4, 56
+; LA64-NEXT:  .LBB44_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a5, $a0, 0
+; LA64-NEXT:    and $a7, $a5, $a3
+; LA64-NEXT:    move $a6, $a5
+; LA64-NEXT:    sll.w $a7, $a7, $a4
+; LA64-NEXT:    sra.w $a7, $a7, $a4
+; LA64-NEXT:    bge $a1, $a7, .LBB44_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB44_1 Depth=1
+; LA64-NEXT:    xor $a6, $a5, $a1
+; LA64-NEXT:    and $a6, $a6, $a3
+; LA64-NEXT:    xor $a6, $a5, $a6
+; LA64-NEXT:  .LBB44_3: # in Loop: Header=BB44_1 Depth=1
+; LA64-NEXT:    sc.w $a6, $a0, 0
+; LA64-NEXT:    beqz $a6, .LBB44_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a5, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw min ptr %a, i8 %b acq_rel
+  ret i8 %1
+}
+
+define i16 @atomicrmw_min_i16_acq_rel(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_min_i16_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    andi $a3, $a2, 24
+; LA64-NEXT:    ori $a4, $zero, 48
+; LA64-NEXT:    sub.d $a3, $a4, $a3
+; LA64-NEXT:    lu12i.w $a4, 15
+; LA64-NEXT:    ori $a4, $a4, 4095
+; LA64-NEXT:    sll.w $a4, $a4, $a2
+; LA64-NEXT:    addi.w $a4, $a4, 0
+; LA64-NEXT:    ext.w.h $a1, $a1
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    addi.w $a1, $a1, 0
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:  .LBB45_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a5, $a0, 0
+; LA64-NEXT:    and $a7, $a5, $a4
+; LA64-NEXT:    move $a6, $a5
+; LA64-NEXT:    sll.w $a7, $a7, $a3
+; LA64-NEXT:    sra.w $a7, $a7, $a3
+; LA64-NEXT:    bge $a1, $a7, .LBB45_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB45_1 Depth=1
+; LA64-NEXT:    xor $a6, $a5, $a1
+; LA64-NEXT:    and $a6, $a6, $a4
+; LA64-NEXT:    xor $a6, $a5, $a6
+; LA64-NEXT:  .LBB45_3: # in Loop: Header=BB45_1 Depth=1
+; LA64-NEXT:    sc.w $a6, $a0, 0
+; LA64-NEXT:    beqz $a6, .LBB45_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a5, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw min ptr %a, i16 %b acq_rel
+  ret i16 %1
+}
+
+define i32 @atomicrmw_min_i32_acq_rel(ptr %a, i32 %b) nounwind {
+; LA64-LABEL: atomicrmw_min_i32_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    ammin_db.w $a2, $a1, $a0
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw min ptr %a, i32 %b acq_rel
+  ret i32 %1
+}
+
+define i64 @atomicrmw_min_i64_acq_rel(ptr %a, i64 %b) nounwind {
+; LA64-LABEL: atomicrmw_min_i64_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    ammin_db.d $a2, $a1, $a0
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw min ptr %a, i64 %b acq_rel
+  ret i64 %1
+}
+
+define i8 @atomicrmw_umax_i8_seq_cst(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_umax_i8_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    ori $a3, $zero, 255
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    addi.w $a3, $a3, 0
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    addi.w $a1, $a1, 0
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:  .LBB48_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    and $a6, $a4, $a3
+; LA64-NEXT:    move $a5, $a4
+; LA64-NEXT:    bgeu $a6, $a1, .LBB48_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB48_1 Depth=1
+; LA64-NEXT:    xor $a5, $a4, $a1
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:  .LBB48_3: # in Loop: Header=BB48_1 Depth=1
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB48_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw umax ptr %a, i8 %b seq_cst
+  ret i8 %1
+}
+
+define i16 @atomicrmw_umax_i16_seq_cst(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_umax_i16_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    lu12i.w $a2, 15
+; LA64-NEXT:    ori $a2, $a2, 4095
+; LA64-NEXT:    slli.d $a3, $a0, 3
+; LA64-NEXT:    sll.w $a2, $a2, $a3
+; LA64-NEXT:    addi.w $a2, $a2, 0
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    sll.w $a1, $a1, $a3
+; LA64-NEXT:    addi.w $a1, $a1, 0
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:  .LBB49_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    and $a6, $a4, $a2
+; LA64-NEXT:    move $a5, $a4
+; LA64-NEXT:    bgeu $a6, $a1, .LBB49_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB49_1 Depth=1
+; LA64-NEXT:    xor $a5, $a4, $a1
+; LA64-NEXT:    and $a5, $a5, $a2
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:  .LBB49_3: # in Loop: Header=BB49_1 Depth=1
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB49_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a4, $a3
+; LA64-NEXT:    ret
+  %1 = atomicrmw umax ptr %a, i16 %b seq_cst
+  ret i16 %1
+}
+
+define i32 @atomicrmw_umax_i32_seq_cst(ptr %a, i32 %b) nounwind {
+; LA64-LABEL: atomicrmw_umax_i32_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    ammax_db.wu $a2, $a1, $a0
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw umax ptr %a, i32 %b seq_cst
+  ret i32 %1
+}
+
+define i64 @atomicrmw_umax_i64_seq_cst(ptr %a, i64 %b) nounwind {
+; LA64-LABEL: atomicrmw_umax_i64_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    ammax_db.du $a2, $a1, $a0
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw umax ptr %a, i64 %b seq_cst
+  ret i64 %1
+}
+
+define i8 @atomicrmw_umin_i8_seq_cst(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_umin_i8_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    ori $a3, $zero, 255
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    addi.w $a3, $a3, 0
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    addi.w $a1, $a1, 0
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:  .LBB52_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    and $a6, $a4, $a3
+; LA64-NEXT:    move $a5, $a4
+; LA64-NEXT:    bgeu $a1, $a6, .LBB52_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB52_1 Depth=1
+; LA64-NEXT:    xor $a5, $a4, $a1
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:  .LBB52_3: # in Loop: Header=BB52_1 Depth=1
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB52_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw umin ptr %a, i8 %b seq_cst
+  ret i8 %1
+}
+
+define i16 @atomicrmw_umin_i16_seq_cst(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_umin_i16_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    lu12i.w $a2, 15
+; LA64-NEXT:    ori $a2, $a2, 4095
+; LA64-NEXT:    slli.d $a3, $a0, 3
+; LA64-NEXT:    sll.w $a2, $a2, $a3
+; LA64-NEXT:    addi.w $a2, $a2, 0
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    sll.w $a1, $a1, $a3
+; LA64-NEXT:    addi.w $a1, $a1, 0
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:  .LBB53_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    and $a6, $a4, $a2
+; LA64-NEXT:    move $a5, $a4
+; LA64-NEXT:    bgeu $a1, $a6, .LBB53_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB53_1 Depth=1
+; LA64-NEXT:    xor $a5, $a4, $a1
+; LA64-NEXT:    and $a5, $a5, $a2
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:  .LBB53_3: # in Loop: Header=BB53_1 Depth=1
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB53_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a4, $a3
+; LA64-NEXT:    ret
+  %1 = atomicrmw umin ptr %a, i16 %b seq_cst
+  ret i16 %1
+}
+
+define i32 @atomicrmw_umin_i32_seq_cst(ptr %a, i32 %b) nounwind {
+; LA64-LABEL: atomicrmw_umin_i32_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    ammin_db.wu $a2, $a1, $a0
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw umin ptr %a, i32 %b seq_cst
+  ret i32 %1
+}
+
+define i64 @atomicrmw_umin_i64_seq_cst(ptr %a, i64 %b) nounwind {
+; LA64-LABEL: atomicrmw_umin_i64_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    ammin_db.du $a2, $a1, $a0
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw umin ptr %a, i64 %b seq_cst
+  ret i64 %1
+}
+
+define i8 @atomicrmw_max_i8_seq_cst(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_max_i8_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    ori $a3, $zero, 255
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    addi.w $a3, $a3, 0
+; LA64-NEXT:    ext.w.b $a1, $a1
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    addi.w $a1, $a1, 0
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    andi $a4, $a2, 24
+; LA64-NEXT:    xori $a4, $a4, 56
+; LA64-NEXT:  .LBB56_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a5, $a0, 0
+; LA64-NEXT:    and $a7, $a5, $a3
+; LA64-NEXT:    move $a6, $a5
+; LA64-NEXT:    sll.w $a7, $a7, $a4
+; LA64-NEXT:    sra.w $a7, $a7, $a4
+; LA64-NEXT:    bge $a7, $a1, .LBB56_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB56_1 Depth=1
+; LA64-NEXT:    xor $a6, $a5, $a1
+; LA64-NEXT:    and $a6, $a6, $a3
+; LA64-NEXT:    xor $a6, $a5, $a6
+; LA64-NEXT:  .LBB56_3: # in Loop: Header=BB56_1 Depth=1
+; LA64-NEXT:    sc.w $a6, $a0, 0
+; LA64-NEXT:    beqz $a6, .LBB56_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a5, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw max ptr %a, i8 %b seq_cst
+  ret i8 %1
+}
+
+define i16 @atomicrmw_max_i16_seq_cst(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_max_i16_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    andi $a3, $a2, 24
+; LA64-NEXT:    ori $a4, $zero, 48
+; LA64-NEXT:    sub.d $a3, $a4, $a3
+; LA64-NEXT:    lu12i.w $a4, 15
+; LA64-NEXT:    ori $a4, $a4, 4095
+; LA64-NEXT:    sll.w $a4, $a4, $a2
+; LA64-NEXT:    addi.w $a4, $a4, 0
+; LA64-NEXT:    ext.w.h $a1, $a1
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    addi.w $a1, $a1, 0
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:  .LBB57_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a5, $a0, 0
+; LA64-NEXT:    and $a7, $a5, $a4
+; LA64-NEXT:    move $a6, $a5
+; LA64-NEXT:    sll.w $a7, $a7, $a3
+; LA64-NEXT:    sra.w $a7, $a7, $a3
+; LA64-NEXT:    bge $a7, $a1, .LBB57_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB57_1 Depth=1
+; LA64-NEXT:    xor $a6, $a5, $a1
+; LA64-NEXT:    and $a6, $a6, $a4
+; LA64-NEXT:    xor $a6, $a5, $a6
+; LA64-NEXT:  .LBB57_3: # in Loop: Header=BB57_1 Depth=1
+; LA64-NEXT:    sc.w $a6, $a0, 0
+; LA64-NEXT:    beqz $a6, .LBB57_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a5, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw max ptr %a, i16 %b seq_cst
+  ret i16 %1
+}
+
+define i32 @atomicrmw_max_i32_seq_cst(ptr %a, i32 %b) nounwind {
+; LA64-LABEL: atomicrmw_max_i32_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    ammax_db.w $a2, $a1, $a0
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw max ptr %a, i32 %b seq_cst
+  ret i32 %1
+}
+
+define i64 @atomicrmw_max_i64_seq_cst(ptr %a, i64 %b) nounwind {
+; LA64-LABEL: atomicrmw_max_i64_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    ammax_db.d $a2, $a1, $a0
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw max ptr %a, i64 %b seq_cst
+  ret i64 %1
+}
+
+define i8 @atomicrmw_min_i8_seq_cst(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_min_i8_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    ori $a3, $zero, 255
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    addi.w $a3, $a3, 0
+; LA64-NEXT:    ext.w.b $a1, $a1
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    addi.w $a1, $a1, 0
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    andi $a4, $a2, 24
+; LA64-NEXT:    xori $a4, $a4, 56
+; LA64-NEXT:  .LBB60_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a5, $a0, 0
+; LA64-NEXT:    and $a7, $a5, $a3
+; LA64-NEXT:    move $a6, $a5
+; LA64-NEXT:    sll.w $a7, $a7, $a4
+; LA64-NEXT:    sra.w $a7, $a7, $a4
+; LA64-NEXT:    bge $a1, $a7, .LBB60_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB60_1 Depth=1
+; LA64-NEXT:    xor $a6, $a5, $a1
+; LA64-NEXT:    and $a6, $a6, $a3
+; LA64-NEXT:    xor $a6, $a5, $a6
+; LA64-NEXT:  .LBB60_3: # in Loop: Header=BB60_1 Depth=1
+; LA64-NEXT:    sc.w $a6, $a0, 0
+; LA64-NEXT:    beqz $a6, .LBB60_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a5, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw min ptr %a, i8 %b seq_cst
+  ret i8 %1
+}
+
+define i16 @atomicrmw_min_i16_seq_cst(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_min_i16_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    andi $a3, $a2, 24
+; LA64-NEXT:    ori $a4, $zero, 48
+; LA64-NEXT:    sub.d $a3, $a4, $a3
+; LA64-NEXT:    lu12i.w $a4, 15
+; LA64-NEXT:    ori $a4, $a4, 4095
+; LA64-NEXT:    sll.w $a4, $a4, $a2
+; LA64-NEXT:    addi.w $a4, $a4, 0
+; LA64-NEXT:    ext.w.h $a1, $a1
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    addi.w $a1, $a1, 0
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:  .LBB61_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a5, $a0, 0
+; LA64-NEXT:    and $a7, $a5, $a4
+; LA64-NEXT:    move $a6, $a5
+; LA64-NEXT:    sll.w $a7, $a7, $a3
+; LA64-NEXT:    sra.w $a7, $a7, $a3
+; LA64-NEXT:    bge $a1, $a7, .LBB61_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB61_1 Depth=1
+; LA64-NEXT:    xor $a6, $a5, $a1
+; LA64-NEXT:    and $a6, $a6, $a4
+; LA64-NEXT:    xor $a6, $a5, $a6
+; LA64-NEXT:  .LBB61_3: # in Loop: Header=BB61_1 Depth=1
+; LA64-NEXT:    sc.w $a6, $a0, 0
+; LA64-NEXT:    beqz $a6, .LBB61_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a5, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw min ptr %a, i16 %b seq_cst
+  ret i16 %1
+}
+
+define i32 @atomicrmw_min_i32_seq_cst(ptr %a, i32 %b) nounwind {
+; LA64-LABEL: atomicrmw_min_i32_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    ammin_db.w $a2, $a1, $a0
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw min ptr %a, i32 %b seq_cst
+  ret i32 %1
+}
+
+define i64 @atomicrmw_min_i64_seq_cst(ptr %a, i64 %b) nounwind {
+; LA64-LABEL: atomicrmw_min_i64_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    ammin_db.d $a2, $a1, $a0
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw min ptr %a, i64 %b seq_cst
+  ret i64 %1
+}
+
+define i8 @atomicrmw_umax_i8_monotonic(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_umax_i8_monotonic:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    ori $a3, $zero, 255
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    addi.w $a3, $a3, 0
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    addi.w $a1, $a1, 0
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:  .LBB64_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    and $a6, $a4, $a3
+; LA64-NEXT:    move $a5, $a4
+; LA64-NEXT:    bgeu $a6, $a1, .LBB64_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB64_1 Depth=1
+; LA64-NEXT:    xor $a5, $a4, $a1
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:  .LBB64_3: # in Loop: Header=BB64_1 Depth=1
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB64_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw umax ptr %a, i8 %b monotonic
+  ret i8 %1
+}
+
+define i16 @atomicrmw_umax_i16_monotonic(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_umax_i16_monotonic:
+; LA64:       # %bb.0:
+; LA64-NEXT:    lu12i.w $a2, 15
+; LA64-NEXT:    ori $a2, $a2, 4095
+; LA64-NEXT:    slli.d $a3, $a0, 3
+; LA64-NEXT:    sll.w $a2, $a2, $a3
+; LA64-NEXT:    addi.w $a2, $a2, 0
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    sll.w $a1, $a1, $a3
+; LA64-NEXT:    addi.w $a1, $a1, 0
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:  .LBB65_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    and $a6, $a4, $a2
+; LA64-NEXT:    move $a5, $a4
+; LA64-NEXT:    bgeu $a6, $a1, .LBB65_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB65_1 Depth=1
+; LA64-NEXT:    xor $a5, $a4, $a1
+; LA64-NEXT:    and $a5, $a5, $a2
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:  .LBB65_3: # in Loop: Header=BB65_1 Depth=1
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB65_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a4, $a3
+; LA64-NEXT:    ret
+  %1 = atomicrmw umax ptr %a, i16 %b monotonic
+  ret i16 %1
+}
+
+define i32 @atomicrmw_umax_i32_monotonic(ptr %a, i32 %b) nounwind {
+; LA64-LABEL: atomicrmw_umax_i32_monotonic:
+; LA64:       # %bb.0:
+; LA64-NEXT:    ammax_db.wu $a2, $a1, $a0
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw umax ptr %a, i32 %b monotonic
+  ret i32 %1
+}
+
+define i64 @atomicrmw_umax_i64_monotonic(ptr %a, i64 %b) nounwind {
+; LA64-LABEL: atomicrmw_umax_i64_monotonic:
+; LA64:       # %bb.0:
+; LA64-NEXT:    ammax_db.du $a2, $a1, $a0
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw umax ptr %a, i64 %b monotonic
+  ret i64 %1
+}
+
+define i8 @atomicrmw_umin_i8_monotonic(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_umin_i8_monotonic:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    ori $a3, $zero, 255
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    addi.w $a3, $a3, 0
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    addi.w $a1, $a1, 0
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:  .LBB68_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    and $a6, $a4, $a3
+; LA64-NEXT:    move $a5, $a4
+; LA64-NEXT:    bgeu $a1, $a6, .LBB68_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB68_1 Depth=1
+; LA64-NEXT:    xor $a5, $a4, $a1
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:  .LBB68_3: # in Loop: Header=BB68_1 Depth=1
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB68_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw umin ptr %a, i8 %b monotonic
+  ret i8 %1
+}
+
+define i16 @atomicrmw_umin_i16_monotonic(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_umin_i16_monotonic:
+; LA64:       # %bb.0:
+; LA64-NEXT:    lu12i.w $a2, 15
+; LA64-NEXT:    ori $a2, $a2, 4095
+; LA64-NEXT:    slli.d $a3, $a0, 3
+; LA64-NEXT:    sll.w $a2, $a2, $a3
+; LA64-NEXT:    addi.w $a2, $a2, 0
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    sll.w $a1, $a1, $a3
+; LA64-NEXT:    addi.w $a1, $a1, 0
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:  .LBB69_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    and $a6, $a4, $a2
+; LA64-NEXT:    move $a5, $a4
+; LA64-NEXT:    bgeu $a1, $a6, .LBB69_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB69_1 Depth=1
+; LA64-NEXT:    xor $a5, $a4, $a1
+; LA64-NEXT:    and $a5, $a5, $a2
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:  .LBB69_3: # in Loop: Header=BB69_1 Depth=1
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB69_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a4, $a3
+; LA64-NEXT:    ret
+  %1 = atomicrmw umin ptr %a, i16 %b monotonic
+  ret i16 %1
+}
+
+define i32 @atomicrmw_umin_i32_monotonic(ptr %a, i32 %b) nounwind {
+; LA64-LABEL: atomicrmw_umin_i32_monotonic:
+; LA64:       # %bb.0:
+; LA64-NEXT:    ammin_db.wu $a2, $a1, $a0
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw umin ptr %a, i32 %b monotonic
+  ret i32 %1
+}
+
+define i64 @atomicrmw_umin_i64_monotonic(ptr %a, i64 %b) nounwind {
+; LA64-LABEL: atomicrmw_umin_i64_monotonic:
+; LA64:       # %bb.0:
+; LA64-NEXT:    ammin_db.du $a2, $a1, $a0
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw umin ptr %a, i64 %b monotonic
+  ret i64 %1
+}
+
+define i8 @atomicrmw_max_i8_monotonic(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_max_i8_monotonic:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    ori $a3, $zero, 255
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    addi.w $a3, $a3, 0
+; LA64-NEXT:    ext.w.b $a1, $a1
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    addi.w $a1, $a1, 0
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    andi $a4, $a2, 24
+; LA64-NEXT:    xori $a4, $a4, 56
+; LA64-NEXT:  .LBB72_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a5, $a0, 0
+; LA64-NEXT:    and $a7, $a5, $a3
+; LA64-NEXT:    move $a6, $a5
+; LA64-NEXT:    sll.w $a7, $a7, $a4
+; LA64-NEXT:    sra.w $a7, $a7, $a4
+; LA64-NEXT:    bge $a7, $a1, .LBB72_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB72_1 Depth=1
+; LA64-NEXT:    xor $a6, $a5, $a1
+; LA64-NEXT:    and $a6, $a6, $a3
+; LA64-NEXT:    xor $a6, $a5, $a6
+; LA64-NEXT:  .LBB72_3: # in Loop: Header=BB72_1 Depth=1
+; LA64-NEXT:    sc.w $a6, $a0, 0
+; LA64-NEXT:    beqz $a6, .LBB72_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a5, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw max ptr %a, i8 %b monotonic
+  ret i8 %1
+}
+
+define i16 @atomicrmw_max_i16_monotonic(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_max_i16_monotonic:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    andi $a3, $a2, 24
+; LA64-NEXT:    ori $a4, $zero, 48
+; LA64-NEXT:    sub.d $a3, $a4, $a3
+; LA64-NEXT:    lu12i.w $a4, 15
+; LA64-NEXT:    ori $a4, $a4, 4095
+; LA64-NEXT:    sll.w $a4, $a4, $a2
+; LA64-NEXT:    addi.w $a4, $a4, 0
+; LA64-NEXT:    ext.w.h $a1, $a1
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    addi.w $a1, $a1, 0
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:  .LBB73_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a5, $a0, 0
+; LA64-NEXT:    and $a7, $a5, $a4
+; LA64-NEXT:    move $a6, $a5
+; LA64-NEXT:    sll.w $a7, $a7, $a3
+; LA64-NEXT:    sra.w $a7, $a7, $a3
+; LA64-NEXT:    bge $a7, $a1, .LBB73_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB73_1 Depth=1
+; LA64-NEXT:    xor $a6, $a5, $a1
+; LA64-NEXT:    and $a6, $a6, $a4
+; LA64-NEXT:    xor $a6, $a5, $a6
+; LA64-NEXT:  .LBB73_3: # in Loop: Header=BB73_1 Depth=1
+; LA64-NEXT:    sc.w $a6, $a0, 0
+; LA64-NEXT:    beqz $a6, .LBB73_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a5, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw max ptr %a, i16 %b monotonic
+  ret i16 %1
+}
+
+define i32 @atomicrmw_max_i32_monotonic(ptr %a, i32 %b) nounwind {
+; LA64-LABEL: atomicrmw_max_i32_monotonic:
+; LA64:       # %bb.0:
+; LA64-NEXT:    ammax_db.w $a2, $a1, $a0
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw max ptr %a, i32 %b monotonic
+  ret i32 %1
+}
+
+define i64 @atomicrmw_max_i64_monotonic(ptr %a, i64 %b) nounwind {
+; LA64-LABEL: atomicrmw_max_i64_monotonic:
+; LA64:       # %bb.0:
+; LA64-NEXT:    ammax_db.d $a2, $a1, $a0
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw max ptr %a, i64 %b monotonic
+  ret i64 %1
+}
+
+define i8 @atomicrmw_min_i8_monotonic(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_min_i8_monotonic:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    ori $a3, $zero, 255
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    addi.w $a3, $a3, 0
+; LA64-NEXT:    ext.w.b $a1, $a1
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    addi.w $a1, $a1, 0
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    andi $a4, $a2, 24
+; LA64-NEXT:    xori $a4, $a4, 56
+; LA64-NEXT:  .LBB76_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a5, $a0, 0
+; LA64-NEXT:    and $a7, $a5, $a3
+; LA64-NEXT:    move $a6, $a5
+; LA64-NEXT:    sll.w $a7, $a7, $a4
+; LA64-NEXT:    sra.w $a7, $a7, $a4
+; LA64-NEXT:    bge $a1, $a7, .LBB76_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB76_1 Depth=1
+; LA64-NEXT:    xor $a6, $a5, $a1
+; LA64-NEXT:    and $a6, $a6, $a3
+; LA64-NEXT:    xor $a6, $a5, $a6
+; LA64-NEXT:  .LBB76_3: # in Loop: Header=BB76_1 Depth=1
+; LA64-NEXT:    sc.w $a6, $a0, 0
+; LA64-NEXT:    beqz $a6, .LBB76_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a5, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw min ptr %a, i8 %b monotonic
+  ret i8 %1
+}
+
+define i16 @atomicrmw_min_i16_monotonic(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_min_i16_monotonic:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    andi $a3, $a2, 24
+; LA64-NEXT:    ori $a4, $zero, 48
+; LA64-NEXT:    sub.d $a3, $a4, $a3
+; LA64-NEXT:    lu12i.w $a4, 15
+; LA64-NEXT:    ori $a4, $a4, 4095
+; LA64-NEXT:    sll.w $a4, $a4, $a2
+; LA64-NEXT:    addi.w $a4, $a4, 0
+; LA64-NEXT:    ext.w.h $a1, $a1
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    addi.w $a1, $a1, 0
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:  .LBB77_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a5, $a0, 0
+; LA64-NEXT:    and $a7, $a5, $a4
+; LA64-NEXT:    move $a6, $a5
+; LA64-NEXT:    sll.w $a7, $a7, $a3
+; LA64-NEXT:    sra.w $a7, $a7, $a3
+; LA64-NEXT:    bge $a1, $a7, .LBB77_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB77_1 Depth=1
+; LA64-NEXT:    xor $a6, $a5, $a1
+; LA64-NEXT:    and $a6, $a6, $a4
+; LA64-NEXT:    xor $a6, $a5, $a6
+; LA64-NEXT:  .LBB77_3: # in Loop: Header=BB77_1 Depth=1
+; LA64-NEXT:    sc.w $a6, $a0, 0
+; LA64-NEXT:    beqz $a6, .LBB77_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a5, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw min ptr %a, i16 %b monotonic
+  ret i16 %1
+}
+
+define i32 @atomicrmw_min_i32_monotonic(ptr %a, i32 %b) nounwind {
+; LA64-LABEL: atomicrmw_min_i32_monotonic:
+; LA64:       # %bb.0:
+; LA64-NEXT:    ammin_db.w $a2, $a1, $a0
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw min ptr %a, i32 %b monotonic
+  ret i32 %1
+}
+
+define i64 @atomicrmw_min_i64_monotonic(ptr %a, i64 %b) nounwind {
+; LA64-LABEL: atomicrmw_min_i64_monotonic:
+; LA64:       # %bb.0:
+; LA64-NEXT:    ammin_db.d $a2, $a1, $a0
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw min ptr %a, i64 %b monotonic
+  ret i64 %1
+}

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw.ll
index 86770c3a2625754..d4f7ed017121da6 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw.ll
@@ -996,6 +996,2988 @@ define i64 @atomicrmw_xor_i64_acquire(ptr %a, i64 %b) nounwind {
   ret i64 %1
 }
 
+define i8 @atomicrmw_xchg_i8_release(ptr %a, i8 %b) nounwind {
+; LA32-LABEL: atomicrmw_xchg_i8_release:
+; LA32:       # %bb.0:
+; LA32-NEXT:    slli.w $a2, $a0, 3
+; LA32-NEXT:    ori $a3, $zero, 255
+; LA32-NEXT:    sll.w $a3, $a3, $a2
+; LA32-NEXT:    andi $a1, $a1, 255
+; LA32-NEXT:    sll.w $a1, $a1, $a2
+; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
+; LA32-NEXT:  .LBB32_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a4, $a0, 0
+; LA32-NEXT:    addi.w $a5, $a1, 0
+; LA32-NEXT:    xor $a5, $a4, $a5
+; LA32-NEXT:    and $a5, $a5, $a3
+; LA32-NEXT:    xor $a5, $a4, $a5
+; LA32-NEXT:    sc.w $a5, $a0, 0
+; LA32-NEXT:    beqz $a5, .LBB32_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    srl.w $a0, $a4, $a2
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_xchg_i8_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    ori $a3, $zero, 255
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    addi.w $a3, $a3, 0
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    addi.w $a1, $a1, 0
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:  .LBB32_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    addi.w $a5, $a1, 0
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB32_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw xchg ptr %a, i8 %b release
+  ret i8 %1
+}
+
+define i8 @atomicrmw_xchg_0_i8_release(ptr %a) nounwind {
+; LA32-LABEL: atomicrmw_xchg_0_i8_release:
+; LA32:       # %bb.0:
+; LA32-NEXT:    slli.w $a1, $a0, 3
+; LA32-NEXT:    ori $a2, $zero, 255
+; LA32-NEXT:    sll.w $a2, $a2, $a1
+; LA32-NEXT:    nor $a2, $a2, $zero
+; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
+; LA32-NEXT:  .LBB33_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a3, $a0, 0
+; LA32-NEXT:    and $a4, $a3, $a2
+; LA32-NEXT:    sc.w $a4, $a0, 0
+; LA32-NEXT:    beqz $a4, .LBB33_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    srl.w $a0, $a3, $a1
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_xchg_0_i8_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a1, $a0, 3
+; LA64-NEXT:    ori $a2, $zero, 255
+; LA64-NEXT:    sll.w $a2, $a2, $a1
+; LA64-NEXT:    nor $a2, $a2, $zero
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    amand_db.w $a3, $a2, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a1
+; LA64-NEXT:    ret
+  %1 = atomicrmw xchg ptr %a, i8 0 release
+  ret i8 %1
+}
+
+define i8 @atomicrmw_xchg_minus_1_i8_release(ptr %a) nounwind {
+; LA32-LABEL: atomicrmw_xchg_minus_1_i8_release:
+; LA32:       # %bb.0:
+; LA32-NEXT:    slli.w $a1, $a0, 3
+; LA32-NEXT:    ori $a2, $zero, 255
+; LA32-NEXT:    sll.w $a2, $a2, $a1
+; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
+; LA32-NEXT:  .LBB34_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a3, $a0, 0
+; LA32-NEXT:    or $a4, $a3, $a2
+; LA32-NEXT:    sc.w $a4, $a0, 0
+; LA32-NEXT:    beqz $a4, .LBB34_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    srl.w $a0, $a3, $a1
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_xchg_minus_1_i8_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a1, $a0, 3
+; LA64-NEXT:    ori $a2, $zero, 255
+; LA64-NEXT:    sll.w $a2, $a2, $a1
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    amor_db.w $a3, $a2, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a1
+; LA64-NEXT:    ret
+  %1 = atomicrmw xchg ptr %a, i8 -1 release
+  ret i8 %1
+}
+
+define i16 @atomicrmw_xchg_i16_release(ptr %a, i16 %b) nounwind {
+; LA32-LABEL: atomicrmw_xchg_i16_release:
+; LA32:       # %bb.0:
+; LA32-NEXT:    lu12i.w $a2, 15
+; LA32-NEXT:    ori $a2, $a2, 4095
+; LA32-NEXT:    slli.w $a3, $a0, 3
+; LA32-NEXT:    sll.w $a2, $a2, $a3
+; LA32-NEXT:    bstrpick.w $a1, $a1, 15, 0
+; LA32-NEXT:    sll.w $a1, $a1, $a3
+; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
+; LA32-NEXT:  .LBB35_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a4, $a0, 0
+; LA32-NEXT:    addi.w $a5, $a1, 0
+; LA32-NEXT:    xor $a5, $a4, $a5
+; LA32-NEXT:    and $a5, $a5, $a2
+; LA32-NEXT:    xor $a5, $a4, $a5
+; LA32-NEXT:    sc.w $a5, $a0, 0
+; LA32-NEXT:    beqz $a5, .LBB35_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    srl.w $a0, $a4, $a3
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_xchg_i16_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    lu12i.w $a2, 15
+; LA64-NEXT:    ori $a2, $a2, 4095
+; LA64-NEXT:    slli.d $a3, $a0, 3
+; LA64-NEXT:    sll.w $a2, $a2, $a3
+; LA64-NEXT:    addi.w $a2, $a2, 0
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    sll.w $a1, $a1, $a3
+; LA64-NEXT:    addi.w $a1, $a1, 0
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:  .LBB35_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    addi.w $a5, $a1, 0
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    and $a5, $a5, $a2
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB35_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    srl.w $a0, $a4, $a3
+; LA64-NEXT:    ret
+  %1 = atomicrmw xchg ptr %a, i16 %b release
+  ret i16 %1
+}
+
+define i16 @atomicrmw_xchg_0_i16_release(ptr %a) nounwind {
+; LA32-LABEL: atomicrmw_xchg_0_i16_release:
+; LA32:       # %bb.0:
+; LA32-NEXT:    lu12i.w $a1, 15
+; LA32-NEXT:    ori $a1, $a1, 4095
+; LA32-NEXT:    slli.w $a2, $a0, 3
+; LA32-NEXT:    sll.w $a1, $a1, $a2
+; LA32-NEXT:    nor $a1, $a1, $zero
+; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
+; LA32-NEXT:  .LBB36_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a3, $a0, 0
+; LA32-NEXT:    and $a4, $a3, $a1
+; LA32-NEXT:    sc.w $a4, $a0, 0
+; LA32-NEXT:    beqz $a4, .LBB36_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    srl.w $a0, $a3, $a2
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_xchg_0_i16_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    lu12i.w $a1, 15
+; LA64-NEXT:    ori $a1, $a1, 4095
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    nor $a1, $a1, $zero
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    amand_db.w $a3, $a1, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw xchg ptr %a, i16 0 release
+  ret i16 %1
+}
+
+define i16 @atomicrmw_xchg_minus_1_i16_release(ptr %a) nounwind {
+; LA32-LABEL: atomicrmw_xchg_minus_1_i16_release:
+; LA32:       # %bb.0:
+; LA32-NEXT:    lu12i.w $a1, 15
+; LA32-NEXT:    ori $a1, $a1, 4095
+; LA32-NEXT:    slli.w $a2, $a0, 3
+; LA32-NEXT:    sll.w $a1, $a1, $a2
+; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
+; LA32-NEXT:  .LBB37_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a3, $a0, 0
+; LA32-NEXT:    or $a4, $a3, $a1
+; LA32-NEXT:    sc.w $a4, $a0, 0
+; LA32-NEXT:    beqz $a4, .LBB37_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    srl.w $a0, $a3, $a2
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_xchg_minus_1_i16_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    lu12i.w $a1, 15
+; LA64-NEXT:    ori $a1, $a1, 4095
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    amor_db.w $a3, $a1, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw xchg ptr %a, i16 -1 release
+  ret i16 %1
+}
+
+define i32 @atomicrmw_xchg_i32_release(ptr %a, i32 %b) nounwind {
+; LA32-LABEL: atomicrmw_xchg_i32_release:
+; LA32:       # %bb.0:
+; LA32-NEXT:  .LBB38_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a2, $a0, 0
+; LA32-NEXT:    move $a3, $a1
+; LA32-NEXT:    sc.w $a3, $a0, 0
+; LA32-NEXT:    beqz $a3, .LBB38_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    move $a0, $a2
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_xchg_i32_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    amswap_db.w $a2, $a1, $a0
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw xchg ptr %a, i32 %b release
+  ret i32 %1
+}
+
+define i64 @atomicrmw_xchg_i64_release(ptr %a, i64 %b) nounwind {
+; LA32-LABEL: atomicrmw_xchg_i64_release:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a3, $zero, 3
+; LA32-NEXT:    bl %plt(__atomic_exchange_8)
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_xchg_i64_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    amswap_db.d $a2, $a1, $a0
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw xchg ptr %a, i64 %b release
+  ret i64 %1
+}
+
+define i8 @atomicrmw_add_i8_release(ptr %a, i8 %b) nounwind {
+; LA32-LABEL: atomicrmw_add_i8_release:
+; LA32:       # %bb.0:
+; LA32-NEXT:    slli.w $a2, $a0, 3
+; LA32-NEXT:    ori $a3, $zero, 255
+; LA32-NEXT:    sll.w $a3, $a3, $a2
+; LA32-NEXT:    andi $a1, $a1, 255
+; LA32-NEXT:    sll.w $a1, $a1, $a2
+; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
+; LA32-NEXT:  .LBB40_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a4, $a0, 0
+; LA32-NEXT:    add.w $a5, $a4, $a1
+; LA32-NEXT:    xor $a5, $a4, $a5
+; LA32-NEXT:    and $a5, $a5, $a3
+; LA32-NEXT:    xor $a5, $a4, $a5
+; LA32-NEXT:    sc.w $a5, $a0, 0
+; LA32-NEXT:    beqz $a5, .LBB40_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    srl.w $a0, $a4, $a2
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_add_i8_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    ori $a3, $zero, 255
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    addi.w $a3, $a3, 0
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    addi.w $a1, $a1, 0
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:  .LBB40_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    add.w $a5, $a4, $a1
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB40_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw add ptr %a, i8 %b release
+  ret i8 %1
+}
+
+define i16 @atomicrmw_add_i16_release(ptr %a, i16 %b) nounwind {
+; LA32-LABEL: atomicrmw_add_i16_release:
+; LA32:       # %bb.0:
+; LA32-NEXT:    lu12i.w $a2, 15
+; LA32-NEXT:    ori $a2, $a2, 4095
+; LA32-NEXT:    slli.w $a3, $a0, 3
+; LA32-NEXT:    sll.w $a2, $a2, $a3
+; LA32-NEXT:    bstrpick.w $a1, $a1, 15, 0
+; LA32-NEXT:    sll.w $a1, $a1, $a3
+; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
+; LA32-NEXT:  .LBB41_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a4, $a0, 0
+; LA32-NEXT:    add.w $a5, $a4, $a1
+; LA32-NEXT:    xor $a5, $a4, $a5
+; LA32-NEXT:    and $a5, $a5, $a2
+; LA32-NEXT:    xor $a5, $a4, $a5
+; LA32-NEXT:    sc.w $a5, $a0, 0
+; LA32-NEXT:    beqz $a5, .LBB41_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    srl.w $a0, $a4, $a3
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_add_i16_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    lu12i.w $a2, 15
+; LA64-NEXT:    ori $a2, $a2, 4095
+; LA64-NEXT:    slli.d $a3, $a0, 3
+; LA64-NEXT:    sll.w $a2, $a2, $a3
+; LA64-NEXT:    addi.w $a2, $a2, 0
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    sll.w $a1, $a1, $a3
+; LA64-NEXT:    addi.w $a1, $a1, 0
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:  .LBB41_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    add.w $a5, $a4, $a1
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    and $a5, $a5, $a2
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB41_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    srl.w $a0, $a4, $a3
+; LA64-NEXT:    ret
+  %1 = atomicrmw add ptr %a, i16 %b release
+  ret i16 %1
+}
+
+define i32 @atomicrmw_add_i32_release(ptr %a, i32 %b) nounwind {
+; LA32-LABEL: atomicrmw_add_i32_release:
+; LA32:       # %bb.0:
+; LA32-NEXT:  .LBB42_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a2, $a0, 0
+; LA32-NEXT:    add.w $a3, $a2, $a1
+; LA32-NEXT:    sc.w $a3, $a0, 0
+; LA32-NEXT:    beqz $a3, .LBB42_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    move $a0, $a2
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_add_i32_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    amadd_db.w $a2, $a1, $a0
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw add ptr %a, i32 %b release
+  ret i32 %1
+}
+
+define i64 @atomicrmw_add_i64_release(ptr %a, i64 %b) nounwind {
+; LA32-LABEL: atomicrmw_add_i64_release:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a3, $zero, 3
+; LA32-NEXT:    bl %plt(__atomic_fetch_add_8)
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_add_i64_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    amadd_db.d $a2, $a1, $a0
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw add ptr %a, i64 %b release
+  ret i64 %1
+}
+
+define i8 @atomicrmw_sub_i8_release(ptr %a, i8 %b) nounwind {
+; LA32-LABEL: atomicrmw_sub_i8_release:
+; LA32:       # %bb.0:
+; LA32-NEXT:    slli.w $a2, $a0, 3
+; LA32-NEXT:    ori $a3, $zero, 255
+; LA32-NEXT:    sll.w $a3, $a3, $a2
+; LA32-NEXT:    andi $a1, $a1, 255
+; LA32-NEXT:    sll.w $a1, $a1, $a2
+; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
+; LA32-NEXT:  .LBB44_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a4, $a0, 0
+; LA32-NEXT:    sub.w $a5, $a4, $a1
+; LA32-NEXT:    xor $a5, $a4, $a5
+; LA32-NEXT:    and $a5, $a5, $a3
+; LA32-NEXT:    xor $a5, $a4, $a5
+; LA32-NEXT:    sc.w $a5, $a0, 0
+; LA32-NEXT:    beqz $a5, .LBB44_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    srl.w $a0, $a4, $a2
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_sub_i8_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    ori $a3, $zero, 255
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    addi.w $a3, $a3, 0
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    addi.w $a1, $a1, 0
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:  .LBB44_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    sub.w $a5, $a4, $a1
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB44_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw sub ptr %a, i8 %b release
+  ret i8 %1
+}
+
+define i16 @atomicrmw_sub_i16_release(ptr %a, i16 %b) nounwind {
+; LA32-LABEL: atomicrmw_sub_i16_release:
+; LA32:       # %bb.0:
+; LA32-NEXT:    lu12i.w $a2, 15
+; LA32-NEXT:    ori $a2, $a2, 4095
+; LA32-NEXT:    slli.w $a3, $a0, 3
+; LA32-NEXT:    sll.w $a2, $a2, $a3
+; LA32-NEXT:    bstrpick.w $a1, $a1, 15, 0
+; LA32-NEXT:    sll.w $a1, $a1, $a3
+; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
+; LA32-NEXT:  .LBB45_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a4, $a0, 0
+; LA32-NEXT:    sub.w $a5, $a4, $a1
+; LA32-NEXT:    xor $a5, $a4, $a5
+; LA32-NEXT:    and $a5, $a5, $a2
+; LA32-NEXT:    xor $a5, $a4, $a5
+; LA32-NEXT:    sc.w $a5, $a0, 0
+; LA32-NEXT:    beqz $a5, .LBB45_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    srl.w $a0, $a4, $a3
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_sub_i16_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    lu12i.w $a2, 15
+; LA64-NEXT:    ori $a2, $a2, 4095
+; LA64-NEXT:    slli.d $a3, $a0, 3
+; LA64-NEXT:    sll.w $a2, $a2, $a3
+; LA64-NEXT:    addi.w $a2, $a2, 0
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    sll.w $a1, $a1, $a3
+; LA64-NEXT:    addi.w $a1, $a1, 0
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:  .LBB45_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    sub.w $a5, $a4, $a1
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    and $a5, $a5, $a2
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB45_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    srl.w $a0, $a4, $a3
+; LA64-NEXT:    ret
+  %1 = atomicrmw sub ptr %a, i16 %b release
+  ret i16 %1
+}
+
+define i32 @atomicrmw_sub_i32_release(ptr %a, i32 %b) nounwind {
+; LA32-LABEL: atomicrmw_sub_i32_release:
+; LA32:       # %bb.0:
+; LA32-NEXT:  .LBB46_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a2, $a0, 0
+; LA32-NEXT:    sub.w $a3, $a2, $a1
+; LA32-NEXT:    sc.w $a3, $a0, 0
+; LA32-NEXT:    beqz $a3, .LBB46_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    move $a0, $a2
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_sub_i32_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    sub.w $a2, $zero, $a1
+; LA64-NEXT:    amadd_db.w $a1, $a2, $a0
+; LA64-NEXT:    move $a0, $a1
+; LA64-NEXT:    ret
+  %1 = atomicrmw sub ptr %a, i32 %b release
+  ret i32 %1
+}
+
+define i64 @atomicrmw_sub_i64_release(ptr %a, i64 %b) nounwind {
+; LA32-LABEL: atomicrmw_sub_i64_release:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a3, $zero, 3
+; LA32-NEXT:    bl %plt(__atomic_fetch_sub_8)
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_sub_i64_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    sub.d $a2, $zero, $a1
+; LA64-NEXT:    amadd_db.d $a1, $a2, $a0
+; LA64-NEXT:    move $a0, $a1
+; LA64-NEXT:    ret
+  %1 = atomicrmw sub ptr %a, i64 %b release
+  ret i64 %1
+}
+
+define i8 @atomicrmw_nand_i8_release(ptr %a, i8 %b) nounwind {
+; LA32-LABEL: atomicrmw_nand_i8_release:
+; LA32:       # %bb.0:
+; LA32-NEXT:    slli.w $a2, $a0, 3
+; LA32-NEXT:    ori $a3, $zero, 255
+; LA32-NEXT:    sll.w $a3, $a3, $a2
+; LA32-NEXT:    andi $a1, $a1, 255
+; LA32-NEXT:    sll.w $a1, $a1, $a2
+; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
+; LA32-NEXT:  .LBB48_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a4, $a0, 0
+; LA32-NEXT:    and $a5, $a4, $a1
+; LA32-NEXT:    nor $a5, $a5, $zero
+; LA32-NEXT:    xor $a5, $a4, $a5
+; LA32-NEXT:    and $a5, $a5, $a3
+; LA32-NEXT:    xor $a5, $a4, $a5
+; LA32-NEXT:    sc.w $a5, $a0, 0
+; LA32-NEXT:    beqz $a5, .LBB48_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    srl.w $a0, $a4, $a2
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_nand_i8_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    ori $a3, $zero, 255
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    addi.w $a3, $a3, 0
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    addi.w $a1, $a1, 0
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:  .LBB48_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    and $a5, $a4, $a1
+; LA64-NEXT:    nor $a5, $a5, $zero
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB48_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw nand ptr %a, i8 %b release
+  ret i8 %1
+}
+
+define i16 @atomicrmw_nand_i16_release(ptr %a, i16 %b) nounwind {
+; LA32-LABEL: atomicrmw_nand_i16_release:
+; LA32:       # %bb.0:
+; LA32-NEXT:    lu12i.w $a2, 15
+; LA32-NEXT:    ori $a2, $a2, 4095
+; LA32-NEXT:    slli.w $a3, $a0, 3
+; LA32-NEXT:    sll.w $a2, $a2, $a3
+; LA32-NEXT:    bstrpick.w $a1, $a1, 15, 0
+; LA32-NEXT:    sll.w $a1, $a1, $a3
+; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
+; LA32-NEXT:  .LBB49_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a4, $a0, 0
+; LA32-NEXT:    and $a5, $a4, $a1
+; LA32-NEXT:    nor $a5, $a5, $zero
+; LA32-NEXT:    xor $a5, $a4, $a5
+; LA32-NEXT:    and $a5, $a5, $a2
+; LA32-NEXT:    xor $a5, $a4, $a5
+; LA32-NEXT:    sc.w $a5, $a0, 0
+; LA32-NEXT:    beqz $a5, .LBB49_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    srl.w $a0, $a4, $a3
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_nand_i16_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    lu12i.w $a2, 15
+; LA64-NEXT:    ori $a2, $a2, 4095
+; LA64-NEXT:    slli.d $a3, $a0, 3
+; LA64-NEXT:    sll.w $a2, $a2, $a3
+; LA64-NEXT:    addi.w $a2, $a2, 0
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    sll.w $a1, $a1, $a3
+; LA64-NEXT:    addi.w $a1, $a1, 0
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:  .LBB49_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    and $a5, $a4, $a1
+; LA64-NEXT:    nor $a5, $a5, $zero
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    and $a5, $a5, $a2
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB49_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    srl.w $a0, $a4, $a3
+; LA64-NEXT:    ret
+  %1 = atomicrmw nand ptr %a, i16 %b release
+  ret i16 %1
+}
+
+define i32 @atomicrmw_nand_i32_release(ptr %a, i32 %b) nounwind {
+; LA32-LABEL: atomicrmw_nand_i32_release:
+; LA32:       # %bb.0:
+; LA32-NEXT:  .LBB50_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a2, $a0, 0
+; LA32-NEXT:    and $a3, $a2, $a1
+; LA32-NEXT:    nor $a3, $a3, $zero
+; LA32-NEXT:    sc.w $a3, $a0, 0
+; LA32-NEXT:    beqz $a3, .LBB50_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    move $a0, $a2
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_nand_i32_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:  .LBB50_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a2, $a0, 0
+; LA64-NEXT:    and $a3, $a2, $a1
+; LA64-NEXT:    nor $a3, $a3, $zero
+; LA64-NEXT:    sc.w $a3, $a0, 0
+; LA64-NEXT:    beqz $a3, .LBB50_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw nand ptr %a, i32 %b release
+  ret i32 %1
+}
+
+define i64 @atomicrmw_nand_i64_release(ptr %a, i64 %b) nounwind {
+; LA32-LABEL: atomicrmw_nand_i64_release:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a3, $zero, 3
+; LA32-NEXT:    bl %plt(__atomic_fetch_nand_8)
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_nand_i64_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:  .LBB51_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.d $a2, $a0, 0
+; LA64-NEXT:    and $a3, $a2, $a1
+; LA64-NEXT:    nor $a3, $a3, $zero
+; LA64-NEXT:    sc.d $a3, $a0, 0
+; LA64-NEXT:    beqz $a3, .LBB51_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw nand ptr %a, i64 %b release
+  ret i64 %1
+}
+
+define i8 @atomicrmw_and_i8_release(ptr %a, i8 %b) nounwind {
+; LA32-LABEL: atomicrmw_and_i8_release:
+; LA32:       # %bb.0:
+; LA32-NEXT:    slli.w $a2, $a0, 3
+; LA32-NEXT:    ori $a3, $zero, 255
+; LA32-NEXT:    sll.w $a3, $a3, $a2
+; LA32-NEXT:    andi $a1, $a1, 255
+; LA32-NEXT:    sll.w $a1, $a1, $a2
+; LA32-NEXT:    orn $a1, $a1, $a3
+; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
+; LA32-NEXT:  .LBB52_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a3, $a0, 0
+; LA32-NEXT:    and $a4, $a3, $a1
+; LA32-NEXT:    sc.w $a4, $a0, 0
+; LA32-NEXT:    beqz $a4, .LBB52_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    srl.w $a0, $a3, $a2
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_and_i8_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    ori $a3, $zero, 255
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    orn $a1, $a1, $a3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    amand_db.w $a3, $a1, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw and ptr %a, i8 %b release
+  ret i8 %1
+}
+
+define i16 @atomicrmw_and_i16_release(ptr %a, i16 %b) nounwind {
+; LA32-LABEL: atomicrmw_and_i16_release:
+; LA32:       # %bb.0:
+; LA32-NEXT:    lu12i.w $a2, 15
+; LA32-NEXT:    ori $a2, $a2, 4095
+; LA32-NEXT:    slli.w $a3, $a0, 3
+; LA32-NEXT:    sll.w $a2, $a2, $a3
+; LA32-NEXT:    bstrpick.w $a1, $a1, 15, 0
+; LA32-NEXT:    sll.w $a1, $a1, $a3
+; LA32-NEXT:    orn $a1, $a1, $a2
+; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
+; LA32-NEXT:  .LBB53_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a2, $a0, 0
+; LA32-NEXT:    and $a4, $a2, $a1
+; LA32-NEXT:    sc.w $a4, $a0, 0
+; LA32-NEXT:    beqz $a4, .LBB53_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    srl.w $a0, $a2, $a3
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_and_i16_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    lu12i.w $a2, 15
+; LA64-NEXT:    ori $a2, $a2, 4095
+; LA64-NEXT:    slli.d $a3, $a0, 3
+; LA64-NEXT:    sll.w $a2, $a2, $a3
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    sll.w $a1, $a1, $a3
+; LA64-NEXT:    orn $a1, $a1, $a2
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    amand_db.w $a2, $a1, $a0
+; LA64-NEXT:    srl.w $a0, $a2, $a3
+; LA64-NEXT:    ret
+  %1 = atomicrmw and ptr %a, i16 %b release
+  ret i16 %1
+}
+
+define i32 @atomicrmw_and_i32_release(ptr %a, i32 %b) nounwind {
+; LA32-LABEL: atomicrmw_and_i32_release:
+; LA32:       # %bb.0:
+; LA32-NEXT:  .LBB54_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a2, $a0, 0
+; LA32-NEXT:    and $a3, $a2, $a1
+; LA32-NEXT:    sc.w $a3, $a0, 0
+; LA32-NEXT:    beqz $a3, .LBB54_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    move $a0, $a2
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_and_i32_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    amand_db.w $a2, $a1, $a0
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw and ptr %a, i32 %b release
+  ret i32 %1
+}
+
+define i64 @atomicrmw_and_i64_release(ptr %a, i64 %b) nounwind {
+; LA32-LABEL: atomicrmw_and_i64_release:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a3, $zero, 3
+; LA32-NEXT:    bl %plt(__atomic_fetch_and_8)
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_and_i64_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    amand_db.d $a2, $a1, $a0
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw and ptr %a, i64 %b release
+  ret i64 %1
+}
+
+define i8 @atomicrmw_or_i8_release(ptr %a, i8 %b) nounwind {
+; LA32-LABEL: atomicrmw_or_i8_release:
+; LA32:       # %bb.0:
+; LA32-NEXT:    andi $a1, $a1, 255
+; LA32-NEXT:    slli.w $a2, $a0, 3
+; LA32-NEXT:    sll.w $a1, $a1, $a2
+; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
+; LA32-NEXT:  .LBB56_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a3, $a0, 0
+; LA32-NEXT:    or $a4, $a3, $a1
+; LA32-NEXT:    sc.w $a4, $a0, 0
+; LA32-NEXT:    beqz $a4, .LBB56_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    srl.w $a0, $a3, $a2
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_or_i8_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    amor_db.w $a3, $a1, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw or ptr %a, i8 %b release
+  ret i8 %1
+}
+
+define i16 @atomicrmw_or_i16_release(ptr %a, i16 %b) nounwind {
+; LA32-LABEL: atomicrmw_or_i16_release:
+; LA32:       # %bb.0:
+; LA32-NEXT:    bstrpick.w $a1, $a1, 15, 0
+; LA32-NEXT:    slli.w $a2, $a0, 3
+; LA32-NEXT:    sll.w $a1, $a1, $a2
+; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
+; LA32-NEXT:  .LBB57_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a3, $a0, 0
+; LA32-NEXT:    or $a4, $a3, $a1
+; LA32-NEXT:    sc.w $a4, $a0, 0
+; LA32-NEXT:    beqz $a4, .LBB57_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    srl.w $a0, $a3, $a2
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_or_i16_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    amor_db.w $a3, $a1, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw or ptr %a, i16 %b release
+  ret i16 %1
+}
+
+define i32 @atomicrmw_or_i32_release(ptr %a, i32 %b) nounwind {
+; LA32-LABEL: atomicrmw_or_i32_release:
+; LA32:       # %bb.0:
+; LA32-NEXT:  .LBB58_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a2, $a0, 0
+; LA32-NEXT:    or $a3, $a2, $a1
+; LA32-NEXT:    sc.w $a3, $a0, 0
+; LA32-NEXT:    beqz $a3, .LBB58_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    move $a0, $a2
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_or_i32_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    amor_db.w $a2, $a1, $a0
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw or ptr %a, i32 %b release
+  ret i32 %1
+}
+
+define i64 @atomicrmw_or_i64_release(ptr %a, i64 %b) nounwind {
+; LA32-LABEL: atomicrmw_or_i64_release:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a3, $zero, 3
+; LA32-NEXT:    bl %plt(__atomic_fetch_or_8)
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_or_i64_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    amor_db.d $a2, $a1, $a0
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw or ptr %a, i64 %b release
+  ret i64 %1
+}
+
+define i8 @atomicrmw_xor_i8_release(ptr %a, i8 %b) nounwind {
+; LA32-LABEL: atomicrmw_xor_i8_release:
+; LA32:       # %bb.0:
+; LA32-NEXT:    andi $a1, $a1, 255
+; LA32-NEXT:    slli.w $a2, $a0, 3
+; LA32-NEXT:    sll.w $a1, $a1, $a2
+; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
+; LA32-NEXT:  .LBB60_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a3, $a0, 0
+; LA32-NEXT:    xor $a4, $a3, $a1
+; LA32-NEXT:    sc.w $a4, $a0, 0
+; LA32-NEXT:    beqz $a4, .LBB60_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    srl.w $a0, $a3, $a2
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_xor_i8_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    amxor_db.w $a3, $a1, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw xor ptr %a, i8 %b release
+  ret i8 %1
+}
+
+define i16 @atomicrmw_xor_i16_release(ptr %a, i16 %b) nounwind {
+; LA32-LABEL: atomicrmw_xor_i16_release:
+; LA32:       # %bb.0:
+; LA32-NEXT:    bstrpick.w $a1, $a1, 15, 0
+; LA32-NEXT:    slli.w $a2, $a0, 3
+; LA32-NEXT:    sll.w $a1, $a1, $a2
+; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
+; LA32-NEXT:  .LBB61_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a3, $a0, 0
+; LA32-NEXT:    xor $a4, $a3, $a1
+; LA32-NEXT:    sc.w $a4, $a0, 0
+; LA32-NEXT:    beqz $a4, .LBB61_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    srl.w $a0, $a3, $a2
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_xor_i16_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    amxor_db.w $a3, $a1, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw xor ptr %a, i16 %b release
+  ret i16 %1
+}
+
+define i32 @atomicrmw_xor_i32_release(ptr %a, i32 %b) nounwind {
+; LA32-LABEL: atomicrmw_xor_i32_release:
+; LA32:       # %bb.0:
+; LA32-NEXT:  .LBB62_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a2, $a0, 0
+; LA32-NEXT:    xor $a3, $a2, $a1
+; LA32-NEXT:    sc.w $a3, $a0, 0
+; LA32-NEXT:    beqz $a3, .LBB62_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    move $a0, $a2
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_xor_i32_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    amxor_db.w $a2, $a1, $a0
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw xor ptr %a, i32 %b release
+  ret i32 %1
+}
+
+define i64 @atomicrmw_xor_i64_release(ptr %a, i64 %b) nounwind {
+; LA32-LABEL: atomicrmw_xor_i64_release:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a3, $zero, 3
+; LA32-NEXT:    bl %plt(__atomic_fetch_xor_8)
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_xor_i64_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    amxor_db.d $a2, $a1, $a0
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw xor ptr %a, i64 %b release
+  ret i64 %1
+}
+
+define i8 @atomicrmw_xchg_i8_acq_rel(ptr %a, i8 %b) nounwind {
+; LA32-LABEL: atomicrmw_xchg_i8_acq_rel:
+; LA32:       # %bb.0:
+; LA32-NEXT:    slli.w $a2, $a0, 3
+; LA32-NEXT:    ori $a3, $zero, 255
+; LA32-NEXT:    sll.w $a3, $a3, $a2
+; LA32-NEXT:    andi $a1, $a1, 255
+; LA32-NEXT:    sll.w $a1, $a1, $a2
+; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
+; LA32-NEXT:  .LBB64_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a4, $a0, 0
+; LA32-NEXT:    addi.w $a5, $a1, 0
+; LA32-NEXT:    xor $a5, $a4, $a5
+; LA32-NEXT:    and $a5, $a5, $a3
+; LA32-NEXT:    xor $a5, $a4, $a5
+; LA32-NEXT:    sc.w $a5, $a0, 0
+; LA32-NEXT:    beqz $a5, .LBB64_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    srl.w $a0, $a4, $a2
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_xchg_i8_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    ori $a3, $zero, 255
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    addi.w $a3, $a3, 0
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    addi.w $a1, $a1, 0
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:  .LBB64_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    addi.w $a5, $a1, 0
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB64_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw xchg ptr %a, i8 %b acq_rel
+  ret i8 %1
+}
+
+define i8 @atomicrmw_xchg_0_i8_acq_rel(ptr %a) nounwind {
+; LA32-LABEL: atomicrmw_xchg_0_i8_acq_rel:
+; LA32:       # %bb.0:
+; LA32-NEXT:    slli.w $a1, $a0, 3
+; LA32-NEXT:    ori $a2, $zero, 255
+; LA32-NEXT:    sll.w $a2, $a2, $a1
+; LA32-NEXT:    nor $a2, $a2, $zero
+; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
+; LA32-NEXT:  .LBB65_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a3, $a0, 0
+; LA32-NEXT:    and $a4, $a3, $a2
+; LA32-NEXT:    sc.w $a4, $a0, 0
+; LA32-NEXT:    beqz $a4, .LBB65_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    srl.w $a0, $a3, $a1
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_xchg_0_i8_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a1, $a0, 3
+; LA64-NEXT:    ori $a2, $zero, 255
+; LA64-NEXT:    sll.w $a2, $a2, $a1
+; LA64-NEXT:    nor $a2, $a2, $zero
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    amand_db.w $a3, $a2, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a1
+; LA64-NEXT:    ret
+  %1 = atomicrmw xchg ptr %a, i8 0 acq_rel
+  ret i8 %1
+}
+
+define i8 @atomicrmw_xchg_minus_1_i8_acq_rel(ptr %a) nounwind {
+; LA32-LABEL: atomicrmw_xchg_minus_1_i8_acq_rel:
+; LA32:       # %bb.0:
+; LA32-NEXT:    slli.w $a1, $a0, 3
+; LA32-NEXT:    ori $a2, $zero, 255
+; LA32-NEXT:    sll.w $a2, $a2, $a1
+; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
+; LA32-NEXT:  .LBB66_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a3, $a0, 0
+; LA32-NEXT:    or $a4, $a3, $a2
+; LA32-NEXT:    sc.w $a4, $a0, 0
+; LA32-NEXT:    beqz $a4, .LBB66_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    srl.w $a0, $a3, $a1
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_xchg_minus_1_i8_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a1, $a0, 3
+; LA64-NEXT:    ori $a2, $zero, 255
+; LA64-NEXT:    sll.w $a2, $a2, $a1
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    amor_db.w $a3, $a2, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a1
+; LA64-NEXT:    ret
+  %1 = atomicrmw xchg ptr %a, i8 -1 acq_rel
+  ret i8 %1
+}
+
+define i16 @atomicrmw_xchg_i16_acq_rel(ptr %a, i16 %b) nounwind {
+; LA32-LABEL: atomicrmw_xchg_i16_acq_rel:
+; LA32:       # %bb.0:
+; LA32-NEXT:    lu12i.w $a2, 15
+; LA32-NEXT:    ori $a2, $a2, 4095
+; LA32-NEXT:    slli.w $a3, $a0, 3
+; LA32-NEXT:    sll.w $a2, $a2, $a3
+; LA32-NEXT:    bstrpick.w $a1, $a1, 15, 0
+; LA32-NEXT:    sll.w $a1, $a1, $a3
+; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
+; LA32-NEXT:  .LBB67_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a4, $a0, 0
+; LA32-NEXT:    addi.w $a5, $a1, 0
+; LA32-NEXT:    xor $a5, $a4, $a5
+; LA32-NEXT:    and $a5, $a5, $a2
+; LA32-NEXT:    xor $a5, $a4, $a5
+; LA32-NEXT:    sc.w $a5, $a0, 0
+; LA32-NEXT:    beqz $a5, .LBB67_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    srl.w $a0, $a4, $a3
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_xchg_i16_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    lu12i.w $a2, 15
+; LA64-NEXT:    ori $a2, $a2, 4095
+; LA64-NEXT:    slli.d $a3, $a0, 3
+; LA64-NEXT:    sll.w $a2, $a2, $a3
+; LA64-NEXT:    addi.w $a2, $a2, 0
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    sll.w $a1, $a1, $a3
+; LA64-NEXT:    addi.w $a1, $a1, 0
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:  .LBB67_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    addi.w $a5, $a1, 0
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    and $a5, $a5, $a2
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB67_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    srl.w $a0, $a4, $a3
+; LA64-NEXT:    ret
+  %1 = atomicrmw xchg ptr %a, i16 %b acq_rel
+  ret i16 %1
+}
+
+define i16 @atomicrmw_xchg_0_i16_acq_rel(ptr %a) nounwind {
+; LA32-LABEL: atomicrmw_xchg_0_i16_acq_rel:
+; LA32:       # %bb.0:
+; LA32-NEXT:    lu12i.w $a1, 15
+; LA32-NEXT:    ori $a1, $a1, 4095
+; LA32-NEXT:    slli.w $a2, $a0, 3
+; LA32-NEXT:    sll.w $a1, $a1, $a2
+; LA32-NEXT:    nor $a1, $a1, $zero
+; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
+; LA32-NEXT:  .LBB68_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a3, $a0, 0
+; LA32-NEXT:    and $a4, $a3, $a1
+; LA32-NEXT:    sc.w $a4, $a0, 0
+; LA32-NEXT:    beqz $a4, .LBB68_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    srl.w $a0, $a3, $a2
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_xchg_0_i16_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    lu12i.w $a1, 15
+; LA64-NEXT:    ori $a1, $a1, 4095
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    nor $a1, $a1, $zero
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    amand_db.w $a3, $a1, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw xchg ptr %a, i16 0 acq_rel
+  ret i16 %1
+}
+
+define i16 @atomicrmw_xchg_minus_1_i16_acq_rel(ptr %a) nounwind {
+; LA32-LABEL: atomicrmw_xchg_minus_1_i16_acq_rel:
+; LA32:       # %bb.0:
+; LA32-NEXT:    lu12i.w $a1, 15
+; LA32-NEXT:    ori $a1, $a1, 4095
+; LA32-NEXT:    slli.w $a2, $a0, 3
+; LA32-NEXT:    sll.w $a1, $a1, $a2
+; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
+; LA32-NEXT:  .LBB69_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a3, $a0, 0
+; LA32-NEXT:    or $a4, $a3, $a1
+; LA32-NEXT:    sc.w $a4, $a0, 0
+; LA32-NEXT:    beqz $a4, .LBB69_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    srl.w $a0, $a3, $a2
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_xchg_minus_1_i16_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    lu12i.w $a1, 15
+; LA64-NEXT:    ori $a1, $a1, 4095
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    amor_db.w $a3, $a1, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw xchg ptr %a, i16 -1 acq_rel
+  ret i16 %1
+}
+
+define i32 @atomicrmw_xchg_i32_acq_rel(ptr %a, i32 %b) nounwind {
+; LA32-LABEL: atomicrmw_xchg_i32_acq_rel:
+; LA32:       # %bb.0:
+; LA32-NEXT:  .LBB70_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a2, $a0, 0
+; LA32-NEXT:    move $a3, $a1
+; LA32-NEXT:    sc.w $a3, $a0, 0
+; LA32-NEXT:    beqz $a3, .LBB70_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    move $a0, $a2
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_xchg_i32_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    amswap_db.w $a2, $a1, $a0
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw xchg ptr %a, i32 %b acq_rel
+  ret i32 %1
+}
+
+define i64 @atomicrmw_xchg_i64_acq_rel(ptr %a, i64 %b) nounwind {
+; LA32-LABEL: atomicrmw_xchg_i64_acq_rel:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a3, $zero, 4
+; LA32-NEXT:    bl %plt(__atomic_exchange_8)
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_xchg_i64_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    amswap_db.d $a2, $a1, $a0
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw xchg ptr %a, i64 %b acq_rel
+  ret i64 %1
+}
+
+define i8 @atomicrmw_add_i8_acq_rel(ptr %a, i8 %b) nounwind {
+; LA32-LABEL: atomicrmw_add_i8_acq_rel:
+; LA32:       # %bb.0:
+; LA32-NEXT:    slli.w $a2, $a0, 3
+; LA32-NEXT:    ori $a3, $zero, 255
+; LA32-NEXT:    sll.w $a3, $a3, $a2
+; LA32-NEXT:    andi $a1, $a1, 255
+; LA32-NEXT:    sll.w $a1, $a1, $a2
+; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
+; LA32-NEXT:  .LBB72_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a4, $a0, 0
+; LA32-NEXT:    add.w $a5, $a4, $a1
+; LA32-NEXT:    xor $a5, $a4, $a5
+; LA32-NEXT:    and $a5, $a5, $a3
+; LA32-NEXT:    xor $a5, $a4, $a5
+; LA32-NEXT:    sc.w $a5, $a0, 0
+; LA32-NEXT:    beqz $a5, .LBB72_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    srl.w $a0, $a4, $a2
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_add_i8_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    ori $a3, $zero, 255
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    addi.w $a3, $a3, 0
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    addi.w $a1, $a1, 0
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:  .LBB72_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    add.w $a5, $a4, $a1
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB72_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw add ptr %a, i8 %b acq_rel
+  ret i8 %1
+}
+
+define i16 @atomicrmw_add_i16_acq_rel(ptr %a, i16 %b) nounwind {
+; LA32-LABEL: atomicrmw_add_i16_acq_rel:
+; LA32:       # %bb.0:
+; LA32-NEXT:    lu12i.w $a2, 15
+; LA32-NEXT:    ori $a2, $a2, 4095
+; LA32-NEXT:    slli.w $a3, $a0, 3
+; LA32-NEXT:    sll.w $a2, $a2, $a3
+; LA32-NEXT:    bstrpick.w $a1, $a1, 15, 0
+; LA32-NEXT:    sll.w $a1, $a1, $a3
+; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
+; LA32-NEXT:  .LBB73_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a4, $a0, 0
+; LA32-NEXT:    add.w $a5, $a4, $a1
+; LA32-NEXT:    xor $a5, $a4, $a5
+; LA32-NEXT:    and $a5, $a5, $a2
+; LA32-NEXT:    xor $a5, $a4, $a5
+; LA32-NEXT:    sc.w $a5, $a0, 0
+; LA32-NEXT:    beqz $a5, .LBB73_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    srl.w $a0, $a4, $a3
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_add_i16_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    lu12i.w $a2, 15
+; LA64-NEXT:    ori $a2, $a2, 4095
+; LA64-NEXT:    slli.d $a3, $a0, 3
+; LA64-NEXT:    sll.w $a2, $a2, $a3
+; LA64-NEXT:    addi.w $a2, $a2, 0
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    sll.w $a1, $a1, $a3
+; LA64-NEXT:    addi.w $a1, $a1, 0
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:  .LBB73_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    add.w $a5, $a4, $a1
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    and $a5, $a5, $a2
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB73_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    srl.w $a0, $a4, $a3
+; LA64-NEXT:    ret
+  %1 = atomicrmw add ptr %a, i16 %b acq_rel
+  ret i16 %1
+}
+
+define i32 @atomicrmw_add_i32_acq_rel(ptr %a, i32 %b) nounwind {
+; LA32-LABEL: atomicrmw_add_i32_acq_rel:
+; LA32:       # %bb.0:
+; LA32-NEXT:  .LBB74_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a2, $a0, 0
+; LA32-NEXT:    add.w $a3, $a2, $a1
+; LA32-NEXT:    sc.w $a3, $a0, 0
+; LA32-NEXT:    beqz $a3, .LBB74_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    move $a0, $a2
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_add_i32_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    amadd_db.w $a2, $a1, $a0
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw add ptr %a, i32 %b acq_rel
+  ret i32 %1
+}
+
+define i64 @atomicrmw_add_i64_acq_rel(ptr %a, i64 %b) nounwind {
+; LA32-LABEL: atomicrmw_add_i64_acq_rel:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a3, $zero, 4
+; LA32-NEXT:    bl %plt(__atomic_fetch_add_8)
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_add_i64_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    amadd_db.d $a2, $a1, $a0
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw add ptr %a, i64 %b acq_rel
+  ret i64 %1
+}
+
+define i8 @atomicrmw_sub_i8_acq_rel(ptr %a, i8 %b) nounwind {
+; LA32-LABEL: atomicrmw_sub_i8_acq_rel:
+; LA32:       # %bb.0:
+; LA32-NEXT:    slli.w $a2, $a0, 3
+; LA32-NEXT:    ori $a3, $zero, 255
+; LA32-NEXT:    sll.w $a3, $a3, $a2
+; LA32-NEXT:    andi $a1, $a1, 255
+; LA32-NEXT:    sll.w $a1, $a1, $a2
+; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
+; LA32-NEXT:  .LBB76_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a4, $a0, 0
+; LA32-NEXT:    sub.w $a5, $a4, $a1
+; LA32-NEXT:    xor $a5, $a4, $a5
+; LA32-NEXT:    and $a5, $a5, $a3
+; LA32-NEXT:    xor $a5, $a4, $a5
+; LA32-NEXT:    sc.w $a5, $a0, 0
+; LA32-NEXT:    beqz $a5, .LBB76_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    srl.w $a0, $a4, $a2
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_sub_i8_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    ori $a3, $zero, 255
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    addi.w $a3, $a3, 0
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    addi.w $a1, $a1, 0
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:  .LBB76_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    sub.w $a5, $a4, $a1
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB76_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw sub ptr %a, i8 %b acq_rel
+  ret i8 %1
+}
+
+define i16 @atomicrmw_sub_i16_acq_rel(ptr %a, i16 %b) nounwind {
+; LA32-LABEL: atomicrmw_sub_i16_acq_rel:
+; LA32:       # %bb.0:
+; LA32-NEXT:    lu12i.w $a2, 15
+; LA32-NEXT:    ori $a2, $a2, 4095
+; LA32-NEXT:    slli.w $a3, $a0, 3
+; LA32-NEXT:    sll.w $a2, $a2, $a3
+; LA32-NEXT:    bstrpick.w $a1, $a1, 15, 0
+; LA32-NEXT:    sll.w $a1, $a1, $a3
+; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
+; LA32-NEXT:  .LBB77_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a4, $a0, 0
+; LA32-NEXT:    sub.w $a5, $a4, $a1
+; LA32-NEXT:    xor $a5, $a4, $a5
+; LA32-NEXT:    and $a5, $a5, $a2
+; LA32-NEXT:    xor $a5, $a4, $a5
+; LA32-NEXT:    sc.w $a5, $a0, 0
+; LA32-NEXT:    beqz $a5, .LBB77_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    srl.w $a0, $a4, $a3
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_sub_i16_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    lu12i.w $a2, 15
+; LA64-NEXT:    ori $a2, $a2, 4095
+; LA64-NEXT:    slli.d $a3, $a0, 3
+; LA64-NEXT:    sll.w $a2, $a2, $a3
+; LA64-NEXT:    addi.w $a2, $a2, 0
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    sll.w $a1, $a1, $a3
+; LA64-NEXT:    addi.w $a1, $a1, 0
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:  .LBB77_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    sub.w $a5, $a4, $a1
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    and $a5, $a5, $a2
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB77_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    srl.w $a0, $a4, $a3
+; LA64-NEXT:    ret
+  %1 = atomicrmw sub ptr %a, i16 %b acq_rel
+  ret i16 %1
+}
+
+define i32 @atomicrmw_sub_i32_acq_rel(ptr %a, i32 %b) nounwind {
+; LA32-LABEL: atomicrmw_sub_i32_acq_rel:
+; LA32:       # %bb.0:
+; LA32-NEXT:  .LBB78_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a2, $a0, 0
+; LA32-NEXT:    sub.w $a3, $a2, $a1
+; LA32-NEXT:    sc.w $a3, $a0, 0
+; LA32-NEXT:    beqz $a3, .LBB78_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    move $a0, $a2
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_sub_i32_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    sub.w $a2, $zero, $a1
+; LA64-NEXT:    amadd_db.w $a1, $a2, $a0
+; LA64-NEXT:    move $a0, $a1
+; LA64-NEXT:    ret
+  %1 = atomicrmw sub ptr %a, i32 %b acq_rel
+  ret i32 %1
+}
+
+define i64 @atomicrmw_sub_i64_acq_rel(ptr %a, i64 %b) nounwind {
+; LA32-LABEL: atomicrmw_sub_i64_acq_rel:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a3, $zero, 4
+; LA32-NEXT:    bl %plt(__atomic_fetch_sub_8)
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_sub_i64_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    sub.d $a2, $zero, $a1
+; LA64-NEXT:    amadd_db.d $a1, $a2, $a0
+; LA64-NEXT:    move $a0, $a1
+; LA64-NEXT:    ret
+  %1 = atomicrmw sub ptr %a, i64 %b acq_rel
+  ret i64 %1
+}
+
+define i8 @atomicrmw_nand_i8_acq_rel(ptr %a, i8 %b) nounwind {
+; LA32-LABEL: atomicrmw_nand_i8_acq_rel:
+; LA32:       # %bb.0:
+; LA32-NEXT:    slli.w $a2, $a0, 3
+; LA32-NEXT:    ori $a3, $zero, 255
+; LA32-NEXT:    sll.w $a3, $a3, $a2
+; LA32-NEXT:    andi $a1, $a1, 255
+; LA32-NEXT:    sll.w $a1, $a1, $a2
+; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
+; LA32-NEXT:  .LBB80_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a4, $a0, 0
+; LA32-NEXT:    and $a5, $a4, $a1
+; LA32-NEXT:    nor $a5, $a5, $zero
+; LA32-NEXT:    xor $a5, $a4, $a5
+; LA32-NEXT:    and $a5, $a5, $a3
+; LA32-NEXT:    xor $a5, $a4, $a5
+; LA32-NEXT:    sc.w $a5, $a0, 0
+; LA32-NEXT:    beqz $a5, .LBB80_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    srl.w $a0, $a4, $a2
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_nand_i8_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    ori $a3, $zero, 255
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    addi.w $a3, $a3, 0
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    addi.w $a1, $a1, 0
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:  .LBB80_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    and $a5, $a4, $a1
+; LA64-NEXT:    nor $a5, $a5, $zero
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB80_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw nand ptr %a, i8 %b acq_rel
+  ret i8 %1
+}
+
+define i16 @atomicrmw_nand_i16_acq_rel(ptr %a, i16 %b) nounwind {
+; LA32-LABEL: atomicrmw_nand_i16_acq_rel:
+; LA32:       # %bb.0:
+; LA32-NEXT:    lu12i.w $a2, 15
+; LA32-NEXT:    ori $a2, $a2, 4095
+; LA32-NEXT:    slli.w $a3, $a0, 3
+; LA32-NEXT:    sll.w $a2, $a2, $a3
+; LA32-NEXT:    bstrpick.w $a1, $a1, 15, 0
+; LA32-NEXT:    sll.w $a1, $a1, $a3
+; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
+; LA32-NEXT:  .LBB81_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a4, $a0, 0
+; LA32-NEXT:    and $a5, $a4, $a1
+; LA32-NEXT:    nor $a5, $a5, $zero
+; LA32-NEXT:    xor $a5, $a4, $a5
+; LA32-NEXT:    and $a5, $a5, $a2
+; LA32-NEXT:    xor $a5, $a4, $a5
+; LA32-NEXT:    sc.w $a5, $a0, 0
+; LA32-NEXT:    beqz $a5, .LBB81_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    srl.w $a0, $a4, $a3
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_nand_i16_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    lu12i.w $a2, 15
+; LA64-NEXT:    ori $a2, $a2, 4095
+; LA64-NEXT:    slli.d $a3, $a0, 3
+; LA64-NEXT:    sll.w $a2, $a2, $a3
+; LA64-NEXT:    addi.w $a2, $a2, 0
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    sll.w $a1, $a1, $a3
+; LA64-NEXT:    addi.w $a1, $a1, 0
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:  .LBB81_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    and $a5, $a4, $a1
+; LA64-NEXT:    nor $a5, $a5, $zero
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    and $a5, $a5, $a2
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB81_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    srl.w $a0, $a4, $a3
+; LA64-NEXT:    ret
+  %1 = atomicrmw nand ptr %a, i16 %b acq_rel
+  ret i16 %1
+}
+
+define i32 @atomicrmw_nand_i32_acq_rel(ptr %a, i32 %b) nounwind {
+; LA32-LABEL: atomicrmw_nand_i32_acq_rel:
+; LA32:       # %bb.0:
+; LA32-NEXT:  .LBB82_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a2, $a0, 0
+; LA32-NEXT:    and $a3, $a2, $a1
+; LA32-NEXT:    nor $a3, $a3, $zero
+; LA32-NEXT:    sc.w $a3, $a0, 0
+; LA32-NEXT:    beqz $a3, .LBB82_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    move $a0, $a2
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_nand_i32_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:  .LBB82_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a2, $a0, 0
+; LA64-NEXT:    and $a3, $a2, $a1
+; LA64-NEXT:    nor $a3, $a3, $zero
+; LA64-NEXT:    sc.w $a3, $a0, 0
+; LA64-NEXT:    beqz $a3, .LBB82_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw nand ptr %a, i32 %b acq_rel
+  ret i32 %1
+}
+
+define i64 @atomicrmw_nand_i64_acq_rel(ptr %a, i64 %b) nounwind {
+; LA32-LABEL: atomicrmw_nand_i64_acq_rel:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a3, $zero, 4
+; LA32-NEXT:    bl %plt(__atomic_fetch_nand_8)
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_nand_i64_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:  .LBB83_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.d $a2, $a0, 0
+; LA64-NEXT:    and $a3, $a2, $a1
+; LA64-NEXT:    nor $a3, $a3, $zero
+; LA64-NEXT:    sc.d $a3, $a0, 0
+; LA64-NEXT:    beqz $a3, .LBB83_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw nand ptr %a, i64 %b acq_rel
+  ret i64 %1
+}
+
+define i8 @atomicrmw_and_i8_acq_rel(ptr %a, i8 %b) nounwind {
+; LA32-LABEL: atomicrmw_and_i8_acq_rel:
+; LA32:       # %bb.0:
+; LA32-NEXT:    slli.w $a2, $a0, 3
+; LA32-NEXT:    ori $a3, $zero, 255
+; LA32-NEXT:    sll.w $a3, $a3, $a2
+; LA32-NEXT:    andi $a1, $a1, 255
+; LA32-NEXT:    sll.w $a1, $a1, $a2
+; LA32-NEXT:    orn $a1, $a1, $a3
+; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
+; LA32-NEXT:  .LBB84_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a3, $a0, 0
+; LA32-NEXT:    and $a4, $a3, $a1
+; LA32-NEXT:    sc.w $a4, $a0, 0
+; LA32-NEXT:    beqz $a4, .LBB84_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    srl.w $a0, $a3, $a2
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_and_i8_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    ori $a3, $zero, 255
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    orn $a1, $a1, $a3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    amand_db.w $a3, $a1, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw and ptr %a, i8 %b acq_rel
+  ret i8 %1
+}
+
+define i16 @atomicrmw_and_i16_acq_rel(ptr %a, i16 %b) nounwind {
+; LA32-LABEL: atomicrmw_and_i16_acq_rel:
+; LA32:       # %bb.0:
+; LA32-NEXT:    lu12i.w $a2, 15
+; LA32-NEXT:    ori $a2, $a2, 4095
+; LA32-NEXT:    slli.w $a3, $a0, 3
+; LA32-NEXT:    sll.w $a2, $a2, $a3
+; LA32-NEXT:    bstrpick.w $a1, $a1, 15, 0
+; LA32-NEXT:    sll.w $a1, $a1, $a3
+; LA32-NEXT:    orn $a1, $a1, $a2
+; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
+; LA32-NEXT:  .LBB85_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a2, $a0, 0
+; LA32-NEXT:    and $a4, $a2, $a1
+; LA32-NEXT:    sc.w $a4, $a0, 0
+; LA32-NEXT:    beqz $a4, .LBB85_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    srl.w $a0, $a2, $a3
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_and_i16_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    lu12i.w $a2, 15
+; LA64-NEXT:    ori $a2, $a2, 4095
+; LA64-NEXT:    slli.d $a3, $a0, 3
+; LA64-NEXT:    sll.w $a2, $a2, $a3
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    sll.w $a1, $a1, $a3
+; LA64-NEXT:    orn $a1, $a1, $a2
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    amand_db.w $a2, $a1, $a0
+; LA64-NEXT:    srl.w $a0, $a2, $a3
+; LA64-NEXT:    ret
+  %1 = atomicrmw and ptr %a, i16 %b acq_rel
+  ret i16 %1
+}
+
+define i32 @atomicrmw_and_i32_acq_rel(ptr %a, i32 %b) nounwind {
+; LA32-LABEL: atomicrmw_and_i32_acq_rel:
+; LA32:       # %bb.0:
+; LA32-NEXT:  .LBB86_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a2, $a0, 0
+; LA32-NEXT:    and $a3, $a2, $a1
+; LA32-NEXT:    sc.w $a3, $a0, 0
+; LA32-NEXT:    beqz $a3, .LBB86_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    move $a0, $a2
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_and_i32_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    amand_db.w $a2, $a1, $a0
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw and ptr %a, i32 %b acq_rel
+  ret i32 %1
+}
+
+define i64 @atomicrmw_and_i64_acq_rel(ptr %a, i64 %b) nounwind {
+; LA32-LABEL: atomicrmw_and_i64_acq_rel:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a3, $zero, 4
+; LA32-NEXT:    bl %plt(__atomic_fetch_and_8)
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_and_i64_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    amand_db.d $a2, $a1, $a0
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw and ptr %a, i64 %b acq_rel
+  ret i64 %1
+}
+
+define i8 @atomicrmw_or_i8_acq_rel(ptr %a, i8 %b) nounwind {
+; LA32-LABEL: atomicrmw_or_i8_acq_rel:
+; LA32:       # %bb.0:
+; LA32-NEXT:    andi $a1, $a1, 255
+; LA32-NEXT:    slli.w $a2, $a0, 3
+; LA32-NEXT:    sll.w $a1, $a1, $a2
+; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
+; LA32-NEXT:  .LBB88_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a3, $a0, 0
+; LA32-NEXT:    or $a4, $a3, $a1
+; LA32-NEXT:    sc.w $a4, $a0, 0
+; LA32-NEXT:    beqz $a4, .LBB88_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    srl.w $a0, $a3, $a2
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_or_i8_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    amor_db.w $a3, $a1, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw or ptr %a, i8 %b acq_rel
+  ret i8 %1
+}
+
+define i16 @atomicrmw_or_i16_acq_rel(ptr %a, i16 %b) nounwind {
+; LA32-LABEL: atomicrmw_or_i16_acq_rel:
+; LA32:       # %bb.0:
+; LA32-NEXT:    bstrpick.w $a1, $a1, 15, 0
+; LA32-NEXT:    slli.w $a2, $a0, 3
+; LA32-NEXT:    sll.w $a1, $a1, $a2
+; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
+; LA32-NEXT:  .LBB89_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a3, $a0, 0
+; LA32-NEXT:    or $a4, $a3, $a1
+; LA32-NEXT:    sc.w $a4, $a0, 0
+; LA32-NEXT:    beqz $a4, .LBB89_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    srl.w $a0, $a3, $a2
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_or_i16_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    amor_db.w $a3, $a1, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw or ptr %a, i16 %b acq_rel
+  ret i16 %1
+}
+
+define i32 @atomicrmw_or_i32_acq_rel(ptr %a, i32 %b) nounwind {
+; LA32-LABEL: atomicrmw_or_i32_acq_rel:
+; LA32:       # %bb.0:
+; LA32-NEXT:  .LBB90_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a2, $a0, 0
+; LA32-NEXT:    or $a3, $a2, $a1
+; LA32-NEXT:    sc.w $a3, $a0, 0
+; LA32-NEXT:    beqz $a3, .LBB90_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    move $a0, $a2
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_or_i32_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    amor_db.w $a2, $a1, $a0
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw or ptr %a, i32 %b acq_rel
+  ret i32 %1
+}
+
+define i64 @atomicrmw_or_i64_acq_rel(ptr %a, i64 %b) nounwind {
+; LA32-LABEL: atomicrmw_or_i64_acq_rel:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a3, $zero, 4
+; LA32-NEXT:    bl %plt(__atomic_fetch_or_8)
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_or_i64_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    amor_db.d $a2, $a1, $a0
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw or ptr %a, i64 %b acq_rel
+  ret i64 %1
+}
+
+define i8 @atomicrmw_xor_i8_acq_rel(ptr %a, i8 %b) nounwind {
+; LA32-LABEL: atomicrmw_xor_i8_acq_rel:
+; LA32:       # %bb.0:
+; LA32-NEXT:    andi $a1, $a1, 255
+; LA32-NEXT:    slli.w $a2, $a0, 3
+; LA32-NEXT:    sll.w $a1, $a1, $a2
+; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
+; LA32-NEXT:  .LBB92_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a3, $a0, 0
+; LA32-NEXT:    xor $a4, $a3, $a1
+; LA32-NEXT:    sc.w $a4, $a0, 0
+; LA32-NEXT:    beqz $a4, .LBB92_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    srl.w $a0, $a3, $a2
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_xor_i8_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    amxor_db.w $a3, $a1, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw xor ptr %a, i8 %b acq_rel
+  ret i8 %1
+}
+
+define i16 @atomicrmw_xor_i16_acq_rel(ptr %a, i16 %b) nounwind {
+; LA32-LABEL: atomicrmw_xor_i16_acq_rel:
+; LA32:       # %bb.0:
+; LA32-NEXT:    bstrpick.w $a1, $a1, 15, 0
+; LA32-NEXT:    slli.w $a2, $a0, 3
+; LA32-NEXT:    sll.w $a1, $a1, $a2
+; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
+; LA32-NEXT:  .LBB93_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a3, $a0, 0
+; LA32-NEXT:    xor $a4, $a3, $a1
+; LA32-NEXT:    sc.w $a4, $a0, 0
+; LA32-NEXT:    beqz $a4, .LBB93_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    srl.w $a0, $a3, $a2
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_xor_i16_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    amxor_db.w $a3, $a1, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw xor ptr %a, i16 %b acq_rel
+  ret i16 %1
+}
+
+define i32 @atomicrmw_xor_i32_acq_rel(ptr %a, i32 %b) nounwind {
+; LA32-LABEL: atomicrmw_xor_i32_acq_rel:
+; LA32:       # %bb.0:
+; LA32-NEXT:  .LBB94_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a2, $a0, 0
+; LA32-NEXT:    xor $a3, $a2, $a1
+; LA32-NEXT:    sc.w $a3, $a0, 0
+; LA32-NEXT:    beqz $a3, .LBB94_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    move $a0, $a2
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_xor_i32_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    amxor_db.w $a2, $a1, $a0
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw xor ptr %a, i32 %b acq_rel
+  ret i32 %1
+}
+
+define i64 @atomicrmw_xor_i64_acq_rel(ptr %a, i64 %b) nounwind {
+; LA32-LABEL: atomicrmw_xor_i64_acq_rel:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a3, $zero, 4
+; LA32-NEXT:    bl %plt(__atomic_fetch_xor_8)
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_xor_i64_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    amxor_db.d $a2, $a1, $a0
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw xor ptr %a, i64 %b acq_rel
+  ret i64 %1
+}
+
+define i8 @atomicrmw_xchg_i8_seq_cst(ptr %a, i8 %b) nounwind {
+; LA32-LABEL: atomicrmw_xchg_i8_seq_cst:
+; LA32:       # %bb.0:
+; LA32-NEXT:    slli.w $a2, $a0, 3
+; LA32-NEXT:    ori $a3, $zero, 255
+; LA32-NEXT:    sll.w $a3, $a3, $a2
+; LA32-NEXT:    andi $a1, $a1, 255
+; LA32-NEXT:    sll.w $a1, $a1, $a2
+; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
+; LA32-NEXT:  .LBB96_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a4, $a0, 0
+; LA32-NEXT:    addi.w $a5, $a1, 0
+; LA32-NEXT:    xor $a5, $a4, $a5
+; LA32-NEXT:    and $a5, $a5, $a3
+; LA32-NEXT:    xor $a5, $a4, $a5
+; LA32-NEXT:    sc.w $a5, $a0, 0
+; LA32-NEXT:    beqz $a5, .LBB96_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    srl.w $a0, $a4, $a2
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_xchg_i8_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    ori $a3, $zero, 255
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    addi.w $a3, $a3, 0
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    addi.w $a1, $a1, 0
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:  .LBB96_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    addi.w $a5, $a1, 0
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB96_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw xchg ptr %a, i8 %b seq_cst
+  ret i8 %1
+}
+
+define i8 @atomicrmw_xchg_0_i8_seq_cst(ptr %a) nounwind {
+; LA32-LABEL: atomicrmw_xchg_0_i8_seq_cst:
+; LA32:       # %bb.0:
+; LA32-NEXT:    slli.w $a1, $a0, 3
+; LA32-NEXT:    ori $a2, $zero, 255
+; LA32-NEXT:    sll.w $a2, $a2, $a1
+; LA32-NEXT:    nor $a2, $a2, $zero
+; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
+; LA32-NEXT:  .LBB97_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a3, $a0, 0
+; LA32-NEXT:    and $a4, $a3, $a2
+; LA32-NEXT:    sc.w $a4, $a0, 0
+; LA32-NEXT:    beqz $a4, .LBB97_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    srl.w $a0, $a3, $a1
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_xchg_0_i8_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a1, $a0, 3
+; LA64-NEXT:    ori $a2, $zero, 255
+; LA64-NEXT:    sll.w $a2, $a2, $a1
+; LA64-NEXT:    nor $a2, $a2, $zero
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    amand_db.w $a3, $a2, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a1
+; LA64-NEXT:    ret
+  %1 = atomicrmw xchg ptr %a, i8 0 seq_cst
+  ret i8 %1
+}
+
+define i8 @atomicrmw_xchg_minus_1_i8_seq_cst(ptr %a) nounwind {
+; LA32-LABEL: atomicrmw_xchg_minus_1_i8_seq_cst:
+; LA32:       # %bb.0:
+; LA32-NEXT:    slli.w $a1, $a0, 3
+; LA32-NEXT:    ori $a2, $zero, 255
+; LA32-NEXT:    sll.w $a2, $a2, $a1
+; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
+; LA32-NEXT:  .LBB98_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a3, $a0, 0
+; LA32-NEXT:    or $a4, $a3, $a2
+; LA32-NEXT:    sc.w $a4, $a0, 0
+; LA32-NEXT:    beqz $a4, .LBB98_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    srl.w $a0, $a3, $a1
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_xchg_minus_1_i8_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a1, $a0, 3
+; LA64-NEXT:    ori $a2, $zero, 255
+; LA64-NEXT:    sll.w $a2, $a2, $a1
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    amor_db.w $a3, $a2, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a1
+; LA64-NEXT:    ret
+  %1 = atomicrmw xchg ptr %a, i8 -1 seq_cst
+  ret i8 %1
+}
+
+define i16 @atomicrmw_xchg_i16_seq_cst(ptr %a, i16 %b) nounwind {
+; LA32-LABEL: atomicrmw_xchg_i16_seq_cst:
+; LA32:       # %bb.0:
+; LA32-NEXT:    lu12i.w $a2, 15
+; LA32-NEXT:    ori $a2, $a2, 4095
+; LA32-NEXT:    slli.w $a3, $a0, 3
+; LA32-NEXT:    sll.w $a2, $a2, $a3
+; LA32-NEXT:    bstrpick.w $a1, $a1, 15, 0
+; LA32-NEXT:    sll.w $a1, $a1, $a3
+; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
+; LA32-NEXT:  .LBB99_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a4, $a0, 0
+; LA32-NEXT:    addi.w $a5, $a1, 0
+; LA32-NEXT:    xor $a5, $a4, $a5
+; LA32-NEXT:    and $a5, $a5, $a2
+; LA32-NEXT:    xor $a5, $a4, $a5
+; LA32-NEXT:    sc.w $a5, $a0, 0
+; LA32-NEXT:    beqz $a5, .LBB99_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    srl.w $a0, $a4, $a3
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_xchg_i16_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    lu12i.w $a2, 15
+; LA64-NEXT:    ori $a2, $a2, 4095
+; LA64-NEXT:    slli.d $a3, $a0, 3
+; LA64-NEXT:    sll.w $a2, $a2, $a3
+; LA64-NEXT:    addi.w $a2, $a2, 0
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    sll.w $a1, $a1, $a3
+; LA64-NEXT:    addi.w $a1, $a1, 0
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:  .LBB99_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    addi.w $a5, $a1, 0
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    and $a5, $a5, $a2
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB99_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    srl.w $a0, $a4, $a3
+; LA64-NEXT:    ret
+  %1 = atomicrmw xchg ptr %a, i16 %b seq_cst
+  ret i16 %1
+}
+
+define i16 @atomicrmw_xchg_0_i16_seq_cst(ptr %a) nounwind {
+; LA32-LABEL: atomicrmw_xchg_0_i16_seq_cst:
+; LA32:       # %bb.0:
+; LA32-NEXT:    lu12i.w $a1, 15
+; LA32-NEXT:    ori $a1, $a1, 4095
+; LA32-NEXT:    slli.w $a2, $a0, 3
+; LA32-NEXT:    sll.w $a1, $a1, $a2
+; LA32-NEXT:    nor $a1, $a1, $zero
+; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
+; LA32-NEXT:  .LBB100_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a3, $a0, 0
+; LA32-NEXT:    and $a4, $a3, $a1
+; LA32-NEXT:    sc.w $a4, $a0, 0
+; LA32-NEXT:    beqz $a4, .LBB100_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    srl.w $a0, $a3, $a2
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_xchg_0_i16_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    lu12i.w $a1, 15
+; LA64-NEXT:    ori $a1, $a1, 4095
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    nor $a1, $a1, $zero
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    amand_db.w $a3, $a1, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw xchg ptr %a, i16 0 seq_cst
+  ret i16 %1
+}
+
+define i16 @atomicrmw_xchg_minus_1_i16_seq_cst(ptr %a) nounwind {
+; LA32-LABEL: atomicrmw_xchg_minus_1_i16_seq_cst:
+; LA32:       # %bb.0:
+; LA32-NEXT:    lu12i.w $a1, 15
+; LA32-NEXT:    ori $a1, $a1, 4095
+; LA32-NEXT:    slli.w $a2, $a0, 3
+; LA32-NEXT:    sll.w $a1, $a1, $a2
+; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
+; LA32-NEXT:  .LBB101_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a3, $a0, 0
+; LA32-NEXT:    or $a4, $a3, $a1
+; LA32-NEXT:    sc.w $a4, $a0, 0
+; LA32-NEXT:    beqz $a4, .LBB101_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    srl.w $a0, $a3, $a2
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_xchg_minus_1_i16_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    lu12i.w $a1, 15
+; LA64-NEXT:    ori $a1, $a1, 4095
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    amor_db.w $a3, $a1, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw xchg ptr %a, i16 -1 seq_cst
+  ret i16 %1
+}
+
+define i32 @atomicrmw_xchg_i32_seq_cst(ptr %a, i32 %b) nounwind {
+; LA32-LABEL: atomicrmw_xchg_i32_seq_cst:
+; LA32:       # %bb.0:
+; LA32-NEXT:  .LBB102_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a2, $a0, 0
+; LA32-NEXT:    move $a3, $a1
+; LA32-NEXT:    sc.w $a3, $a0, 0
+; LA32-NEXT:    beqz $a3, .LBB102_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    move $a0, $a2
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_xchg_i32_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    amswap_db.w $a2, $a1, $a0
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw xchg ptr %a, i32 %b seq_cst
+  ret i32 %1
+}
+
+define i64 @atomicrmw_xchg_i64_seq_cst(ptr %a, i64 %b) nounwind {
+; LA32-LABEL: atomicrmw_xchg_i64_seq_cst:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a3, $zero, 5
+; LA32-NEXT:    bl %plt(__atomic_exchange_8)
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_xchg_i64_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    amswap_db.d $a2, $a1, $a0
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw xchg ptr %a, i64 %b seq_cst
+  ret i64 %1
+}
+
+define i8 @atomicrmw_add_i8_seq_cst(ptr %a, i8 %b) nounwind {
+; LA32-LABEL: atomicrmw_add_i8_seq_cst:
+; LA32:       # %bb.0:
+; LA32-NEXT:    slli.w $a2, $a0, 3
+; LA32-NEXT:    ori $a3, $zero, 255
+; LA32-NEXT:    sll.w $a3, $a3, $a2
+; LA32-NEXT:    andi $a1, $a1, 255
+; LA32-NEXT:    sll.w $a1, $a1, $a2
+; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
+; LA32-NEXT:  .LBB104_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a4, $a0, 0
+; LA32-NEXT:    add.w $a5, $a4, $a1
+; LA32-NEXT:    xor $a5, $a4, $a5
+; LA32-NEXT:    and $a5, $a5, $a3
+; LA32-NEXT:    xor $a5, $a4, $a5
+; LA32-NEXT:    sc.w $a5, $a0, 0
+; LA32-NEXT:    beqz $a5, .LBB104_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    srl.w $a0, $a4, $a2
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_add_i8_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    ori $a3, $zero, 255
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    addi.w $a3, $a3, 0
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    addi.w $a1, $a1, 0
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:  .LBB104_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    add.w $a5, $a4, $a1
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB104_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw add ptr %a, i8 %b seq_cst
+  ret i8 %1
+}
+
+define i16 @atomicrmw_add_i16_seq_cst(ptr %a, i16 %b) nounwind {
+; LA32-LABEL: atomicrmw_add_i16_seq_cst:
+; LA32:       # %bb.0:
+; LA32-NEXT:    lu12i.w $a2, 15
+; LA32-NEXT:    ori $a2, $a2, 4095
+; LA32-NEXT:    slli.w $a3, $a0, 3
+; LA32-NEXT:    sll.w $a2, $a2, $a3
+; LA32-NEXT:    bstrpick.w $a1, $a1, 15, 0
+; LA32-NEXT:    sll.w $a1, $a1, $a3
+; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
+; LA32-NEXT:  .LBB105_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a4, $a0, 0
+; LA32-NEXT:    add.w $a5, $a4, $a1
+; LA32-NEXT:    xor $a5, $a4, $a5
+; LA32-NEXT:    and $a5, $a5, $a2
+; LA32-NEXT:    xor $a5, $a4, $a5
+; LA32-NEXT:    sc.w $a5, $a0, 0
+; LA32-NEXT:    beqz $a5, .LBB105_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    srl.w $a0, $a4, $a3
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_add_i16_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    lu12i.w $a2, 15
+; LA64-NEXT:    ori $a2, $a2, 4095
+; LA64-NEXT:    slli.d $a3, $a0, 3
+; LA64-NEXT:    sll.w $a2, $a2, $a3
+; LA64-NEXT:    addi.w $a2, $a2, 0
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    sll.w $a1, $a1, $a3
+; LA64-NEXT:    addi.w $a1, $a1, 0
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:  .LBB105_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    add.w $a5, $a4, $a1
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    and $a5, $a5, $a2
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB105_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    srl.w $a0, $a4, $a3
+; LA64-NEXT:    ret
+  %1 = atomicrmw add ptr %a, i16 %b seq_cst
+  ret i16 %1
+}
+
+define i32 @atomicrmw_add_i32_seq_cst(ptr %a, i32 %b) nounwind {
+; LA32-LABEL: atomicrmw_add_i32_seq_cst:
+; LA32:       # %bb.0:
+; LA32-NEXT:  .LBB106_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a2, $a0, 0
+; LA32-NEXT:    add.w $a3, $a2, $a1
+; LA32-NEXT:    sc.w $a3, $a0, 0
+; LA32-NEXT:    beqz $a3, .LBB106_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    move $a0, $a2
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_add_i32_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    amadd_db.w $a2, $a1, $a0
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw add ptr %a, i32 %b seq_cst
+  ret i32 %1
+}
+
+define i64 @atomicrmw_add_i64_seq_cst(ptr %a, i64 %b) nounwind {
+; LA32-LABEL: atomicrmw_add_i64_seq_cst:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a3, $zero, 5
+; LA32-NEXT:    bl %plt(__atomic_fetch_add_8)
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_add_i64_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    amadd_db.d $a2, $a1, $a0
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw add ptr %a, i64 %b seq_cst
+  ret i64 %1
+}
+
+define i8 @atomicrmw_sub_i8_seq_cst(ptr %a, i8 %b) nounwind {
+; LA32-LABEL: atomicrmw_sub_i8_seq_cst:
+; LA32:       # %bb.0:
+; LA32-NEXT:    slli.w $a2, $a0, 3
+; LA32-NEXT:    ori $a3, $zero, 255
+; LA32-NEXT:    sll.w $a3, $a3, $a2
+; LA32-NEXT:    andi $a1, $a1, 255
+; LA32-NEXT:    sll.w $a1, $a1, $a2
+; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
+; LA32-NEXT:  .LBB108_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a4, $a0, 0
+; LA32-NEXT:    sub.w $a5, $a4, $a1
+; LA32-NEXT:    xor $a5, $a4, $a5
+; LA32-NEXT:    and $a5, $a5, $a3
+; LA32-NEXT:    xor $a5, $a4, $a5
+; LA32-NEXT:    sc.w $a5, $a0, 0
+; LA32-NEXT:    beqz $a5, .LBB108_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    srl.w $a0, $a4, $a2
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_sub_i8_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    ori $a3, $zero, 255
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    addi.w $a3, $a3, 0
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    addi.w $a1, $a1, 0
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:  .LBB108_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    sub.w $a5, $a4, $a1
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB108_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw sub ptr %a, i8 %b seq_cst
+  ret i8 %1
+}
+
+define i16 @atomicrmw_sub_i16_seq_cst(ptr %a, i16 %b) nounwind {
+; LA32-LABEL: atomicrmw_sub_i16_seq_cst:
+; LA32:       # %bb.0:
+; LA32-NEXT:    lu12i.w $a2, 15
+; LA32-NEXT:    ori $a2, $a2, 4095
+; LA32-NEXT:    slli.w $a3, $a0, 3
+; LA32-NEXT:    sll.w $a2, $a2, $a3
+; LA32-NEXT:    bstrpick.w $a1, $a1, 15, 0
+; LA32-NEXT:    sll.w $a1, $a1, $a3
+; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
+; LA32-NEXT:  .LBB109_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a4, $a0, 0
+; LA32-NEXT:    sub.w $a5, $a4, $a1
+; LA32-NEXT:    xor $a5, $a4, $a5
+; LA32-NEXT:    and $a5, $a5, $a2
+; LA32-NEXT:    xor $a5, $a4, $a5
+; LA32-NEXT:    sc.w $a5, $a0, 0
+; LA32-NEXT:    beqz $a5, .LBB109_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    srl.w $a0, $a4, $a3
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_sub_i16_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    lu12i.w $a2, 15
+; LA64-NEXT:    ori $a2, $a2, 4095
+; LA64-NEXT:    slli.d $a3, $a0, 3
+; LA64-NEXT:    sll.w $a2, $a2, $a3
+; LA64-NEXT:    addi.w $a2, $a2, 0
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    sll.w $a1, $a1, $a3
+; LA64-NEXT:    addi.w $a1, $a1, 0
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:  .LBB109_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    sub.w $a5, $a4, $a1
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    and $a5, $a5, $a2
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB109_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    srl.w $a0, $a4, $a3
+; LA64-NEXT:    ret
+  %1 = atomicrmw sub ptr %a, i16 %b seq_cst
+  ret i16 %1
+}
+
+define i32 @atomicrmw_sub_i32_seq_cst(ptr %a, i32 %b) nounwind {
+; LA32-LABEL: atomicrmw_sub_i32_seq_cst:
+; LA32:       # %bb.0:
+; LA32-NEXT:  .LBB110_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a2, $a0, 0
+; LA32-NEXT:    sub.w $a3, $a2, $a1
+; LA32-NEXT:    sc.w $a3, $a0, 0
+; LA32-NEXT:    beqz $a3, .LBB110_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    move $a0, $a2
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_sub_i32_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    sub.w $a2, $zero, $a1
+; LA64-NEXT:    amadd_db.w $a1, $a2, $a0
+; LA64-NEXT:    move $a0, $a1
+; LA64-NEXT:    ret
+  %1 = atomicrmw sub ptr %a, i32 %b seq_cst
+  ret i32 %1
+}
+
+define i64 @atomicrmw_sub_i64_seq_cst(ptr %a, i64 %b) nounwind {
+; LA32-LABEL: atomicrmw_sub_i64_seq_cst:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a3, $zero, 5
+; LA32-NEXT:    bl %plt(__atomic_fetch_sub_8)
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_sub_i64_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    sub.d $a2, $zero, $a1
+; LA64-NEXT:    amadd_db.d $a1, $a2, $a0
+; LA64-NEXT:    move $a0, $a1
+; LA64-NEXT:    ret
+  %1 = atomicrmw sub ptr %a, i64 %b seq_cst
+  ret i64 %1
+}
+
+define i8 @atomicrmw_nand_i8_seq_cst(ptr %a, i8 %b) nounwind {
+; LA32-LABEL: atomicrmw_nand_i8_seq_cst:
+; LA32:       # %bb.0:
+; LA32-NEXT:    slli.w $a2, $a0, 3
+; LA32-NEXT:    ori $a3, $zero, 255
+; LA32-NEXT:    sll.w $a3, $a3, $a2
+; LA32-NEXT:    andi $a1, $a1, 255
+; LA32-NEXT:    sll.w $a1, $a1, $a2
+; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
+; LA32-NEXT:  .LBB112_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a4, $a0, 0
+; LA32-NEXT:    and $a5, $a4, $a1
+; LA32-NEXT:    nor $a5, $a5, $zero
+; LA32-NEXT:    xor $a5, $a4, $a5
+; LA32-NEXT:    and $a5, $a5, $a3
+; LA32-NEXT:    xor $a5, $a4, $a5
+; LA32-NEXT:    sc.w $a5, $a0, 0
+; LA32-NEXT:    beqz $a5, .LBB112_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    srl.w $a0, $a4, $a2
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_nand_i8_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    ori $a3, $zero, 255
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    addi.w $a3, $a3, 0
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    addi.w $a1, $a1, 0
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:  .LBB112_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    and $a5, $a4, $a1
+; LA64-NEXT:    nor $a5, $a5, $zero
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB112_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw nand ptr %a, i8 %b seq_cst
+  ret i8 %1
+}
+
+define i16 @atomicrmw_nand_i16_seq_cst(ptr %a, i16 %b) nounwind {
+; LA32-LABEL: atomicrmw_nand_i16_seq_cst:
+; LA32:       # %bb.0:
+; LA32-NEXT:    lu12i.w $a2, 15
+; LA32-NEXT:    ori $a2, $a2, 4095
+; LA32-NEXT:    slli.w $a3, $a0, 3
+; LA32-NEXT:    sll.w $a2, $a2, $a3
+; LA32-NEXT:    bstrpick.w $a1, $a1, 15, 0
+; LA32-NEXT:    sll.w $a1, $a1, $a3
+; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
+; LA32-NEXT:  .LBB113_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a4, $a0, 0
+; LA32-NEXT:    and $a5, $a4, $a1
+; LA32-NEXT:    nor $a5, $a5, $zero
+; LA32-NEXT:    xor $a5, $a4, $a5
+; LA32-NEXT:    and $a5, $a5, $a2
+; LA32-NEXT:    xor $a5, $a4, $a5
+; LA32-NEXT:    sc.w $a5, $a0, 0
+; LA32-NEXT:    beqz $a5, .LBB113_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    srl.w $a0, $a4, $a3
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_nand_i16_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    lu12i.w $a2, 15
+; LA64-NEXT:    ori $a2, $a2, 4095
+; LA64-NEXT:    slli.d $a3, $a0, 3
+; LA64-NEXT:    sll.w $a2, $a2, $a3
+; LA64-NEXT:    addi.w $a2, $a2, 0
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    sll.w $a1, $a1, $a3
+; LA64-NEXT:    addi.w $a1, $a1, 0
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:  .LBB113_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    and $a5, $a4, $a1
+; LA64-NEXT:    nor $a5, $a5, $zero
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    and $a5, $a5, $a2
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB113_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    srl.w $a0, $a4, $a3
+; LA64-NEXT:    ret
+  %1 = atomicrmw nand ptr %a, i16 %b seq_cst
+  ret i16 %1
+}
+
+define i32 @atomicrmw_nand_i32_seq_cst(ptr %a, i32 %b) nounwind {
+; LA32-LABEL: atomicrmw_nand_i32_seq_cst:
+; LA32:       # %bb.0:
+; LA32-NEXT:  .LBB114_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a2, $a0, 0
+; LA32-NEXT:    and $a3, $a2, $a1
+; LA32-NEXT:    nor $a3, $a3, $zero
+; LA32-NEXT:    sc.w $a3, $a0, 0
+; LA32-NEXT:    beqz $a3, .LBB114_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    move $a0, $a2
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_nand_i32_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:  .LBB114_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a2, $a0, 0
+; LA64-NEXT:    and $a3, $a2, $a1
+; LA64-NEXT:    nor $a3, $a3, $zero
+; LA64-NEXT:    sc.w $a3, $a0, 0
+; LA64-NEXT:    beqz $a3, .LBB114_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw nand ptr %a, i32 %b seq_cst
+  ret i32 %1
+}
+
+define i64 @atomicrmw_nand_i64_seq_cst(ptr %a, i64 %b) nounwind {
+; LA32-LABEL: atomicrmw_nand_i64_seq_cst:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a3, $zero, 5
+; LA32-NEXT:    bl %plt(__atomic_fetch_nand_8)
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_nand_i64_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:  .LBB115_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.d $a2, $a0, 0
+; LA64-NEXT:    and $a3, $a2, $a1
+; LA64-NEXT:    nor $a3, $a3, $zero
+; LA64-NEXT:    sc.d $a3, $a0, 0
+; LA64-NEXT:    beqz $a3, .LBB115_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw nand ptr %a, i64 %b seq_cst
+  ret i64 %1
+}
+
+define i8 @atomicrmw_and_i8_seq_cst(ptr %a, i8 %b) nounwind {
+; LA32-LABEL: atomicrmw_and_i8_seq_cst:
+; LA32:       # %bb.0:
+; LA32-NEXT:    slli.w $a2, $a0, 3
+; LA32-NEXT:    ori $a3, $zero, 255
+; LA32-NEXT:    sll.w $a3, $a3, $a2
+; LA32-NEXT:    andi $a1, $a1, 255
+; LA32-NEXT:    sll.w $a1, $a1, $a2
+; LA32-NEXT:    orn $a1, $a1, $a3
+; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
+; LA32-NEXT:  .LBB116_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a3, $a0, 0
+; LA32-NEXT:    and $a4, $a3, $a1
+; LA32-NEXT:    sc.w $a4, $a0, 0
+; LA32-NEXT:    beqz $a4, .LBB116_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    srl.w $a0, $a3, $a2
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_and_i8_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    ori $a3, $zero, 255
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    orn $a1, $a1, $a3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    amand_db.w $a3, $a1, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw and ptr %a, i8 %b seq_cst
+  ret i8 %1
+}
+
+define i16 @atomicrmw_and_i16_seq_cst(ptr %a, i16 %b) nounwind {
+; LA32-LABEL: atomicrmw_and_i16_seq_cst:
+; LA32:       # %bb.0:
+; LA32-NEXT:    lu12i.w $a2, 15
+; LA32-NEXT:    ori $a2, $a2, 4095
+; LA32-NEXT:    slli.w $a3, $a0, 3
+; LA32-NEXT:    sll.w $a2, $a2, $a3
+; LA32-NEXT:    bstrpick.w $a1, $a1, 15, 0
+; LA32-NEXT:    sll.w $a1, $a1, $a3
+; LA32-NEXT:    orn $a1, $a1, $a2
+; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
+; LA32-NEXT:  .LBB117_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a2, $a0, 0
+; LA32-NEXT:    and $a4, $a2, $a1
+; LA32-NEXT:    sc.w $a4, $a0, 0
+; LA32-NEXT:    beqz $a4, .LBB117_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    srl.w $a0, $a2, $a3
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_and_i16_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    lu12i.w $a2, 15
+; LA64-NEXT:    ori $a2, $a2, 4095
+; LA64-NEXT:    slli.d $a3, $a0, 3
+; LA64-NEXT:    sll.w $a2, $a2, $a3
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    sll.w $a1, $a1, $a3
+; LA64-NEXT:    orn $a1, $a1, $a2
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    amand_db.w $a2, $a1, $a0
+; LA64-NEXT:    srl.w $a0, $a2, $a3
+; LA64-NEXT:    ret
+  %1 = atomicrmw and ptr %a, i16 %b seq_cst
+  ret i16 %1
+}
+
+define i32 @atomicrmw_and_i32_seq_cst(ptr %a, i32 %b) nounwind {
+; LA32-LABEL: atomicrmw_and_i32_seq_cst:
+; LA32:       # %bb.0:
+; LA32-NEXT:  .LBB118_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a2, $a0, 0
+; LA32-NEXT:    and $a3, $a2, $a1
+; LA32-NEXT:    sc.w $a3, $a0, 0
+; LA32-NEXT:    beqz $a3, .LBB118_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    move $a0, $a2
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_and_i32_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    amand_db.w $a2, $a1, $a0
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw and ptr %a, i32 %b seq_cst
+  ret i32 %1
+}
+
+define i64 @atomicrmw_and_i64_seq_cst(ptr %a, i64 %b) nounwind {
+; LA32-LABEL: atomicrmw_and_i64_seq_cst:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a3, $zero, 5
+; LA32-NEXT:    bl %plt(__atomic_fetch_and_8)
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_and_i64_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    amand_db.d $a2, $a1, $a0
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw and ptr %a, i64 %b seq_cst
+  ret i64 %1
+}
+
+define i8 @atomicrmw_or_i8_seq_cst(ptr %a, i8 %b) nounwind {
+; LA32-LABEL: atomicrmw_or_i8_seq_cst:
+; LA32:       # %bb.0:
+; LA32-NEXT:    andi $a1, $a1, 255
+; LA32-NEXT:    slli.w $a2, $a0, 3
+; LA32-NEXT:    sll.w $a1, $a1, $a2
+; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
+; LA32-NEXT:  .LBB120_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a3, $a0, 0
+; LA32-NEXT:    or $a4, $a3, $a1
+; LA32-NEXT:    sc.w $a4, $a0, 0
+; LA32-NEXT:    beqz $a4, .LBB120_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    srl.w $a0, $a3, $a2
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_or_i8_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    amor_db.w $a3, $a1, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw or ptr %a, i8 %b seq_cst
+  ret i8 %1
+}
+
+define i16 @atomicrmw_or_i16_seq_cst(ptr %a, i16 %b) nounwind {
+; LA32-LABEL: atomicrmw_or_i16_seq_cst:
+; LA32:       # %bb.0:
+; LA32-NEXT:    bstrpick.w $a1, $a1, 15, 0
+; LA32-NEXT:    slli.w $a2, $a0, 3
+; LA32-NEXT:    sll.w $a1, $a1, $a2
+; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
+; LA32-NEXT:  .LBB121_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a3, $a0, 0
+; LA32-NEXT:    or $a4, $a3, $a1
+; LA32-NEXT:    sc.w $a4, $a0, 0
+; LA32-NEXT:    beqz $a4, .LBB121_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    srl.w $a0, $a3, $a2
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_or_i16_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    amor_db.w $a3, $a1, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw or ptr %a, i16 %b seq_cst
+  ret i16 %1
+}
+
+define i32 @atomicrmw_or_i32_seq_cst(ptr %a, i32 %b) nounwind {
+; LA32-LABEL: atomicrmw_or_i32_seq_cst:
+; LA32:       # %bb.0:
+; LA32-NEXT:  .LBB122_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a2, $a0, 0
+; LA32-NEXT:    or $a3, $a2, $a1
+; LA32-NEXT:    sc.w $a3, $a0, 0
+; LA32-NEXT:    beqz $a3, .LBB122_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    move $a0, $a2
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_or_i32_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    amor_db.w $a2, $a1, $a0
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw or ptr %a, i32 %b seq_cst
+  ret i32 %1
+}
+
+define i64 @atomicrmw_or_i64_seq_cst(ptr %a, i64 %b) nounwind {
+; LA32-LABEL: atomicrmw_or_i64_seq_cst:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a3, $zero, 5
+; LA32-NEXT:    bl %plt(__atomic_fetch_or_8)
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_or_i64_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    amor_db.d $a2, $a1, $a0
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw or ptr %a, i64 %b seq_cst
+  ret i64 %1
+}
+
+define i8 @atomicrmw_xor_i8_seq_cst(ptr %a, i8 %b) nounwind {
+; LA32-LABEL: atomicrmw_xor_i8_seq_cst:
+; LA32:       # %bb.0:
+; LA32-NEXT:    andi $a1, $a1, 255
+; LA32-NEXT:    slli.w $a2, $a0, 3
+; LA32-NEXT:    sll.w $a1, $a1, $a2
+; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
+; LA32-NEXT:  .LBB124_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a3, $a0, 0
+; LA32-NEXT:    xor $a4, $a3, $a1
+; LA32-NEXT:    sc.w $a4, $a0, 0
+; LA32-NEXT:    beqz $a4, .LBB124_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    srl.w $a0, $a3, $a2
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_xor_i8_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    amxor_db.w $a3, $a1, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw xor ptr %a, i8 %b seq_cst
+  ret i8 %1
+}
+
+define i16 @atomicrmw_xor_i16_seq_cst(ptr %a, i16 %b) nounwind {
+; LA32-LABEL: atomicrmw_xor_i16_seq_cst:
+; LA32:       # %bb.0:
+; LA32-NEXT:    bstrpick.w $a1, $a1, 15, 0
+; LA32-NEXT:    slli.w $a2, $a0, 3
+; LA32-NEXT:    sll.w $a1, $a1, $a2
+; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
+; LA32-NEXT:  .LBB125_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a3, $a0, 0
+; LA32-NEXT:    xor $a4, $a3, $a1
+; LA32-NEXT:    sc.w $a4, $a0, 0
+; LA32-NEXT:    beqz $a4, .LBB125_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    srl.w $a0, $a3, $a2
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_xor_i16_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    amxor_db.w $a3, $a1, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw xor ptr %a, i16 %b seq_cst
+  ret i16 %1
+}
+
+define i32 @atomicrmw_xor_i32_seq_cst(ptr %a, i32 %b) nounwind {
+; LA32-LABEL: atomicrmw_xor_i32_seq_cst:
+; LA32:       # %bb.0:
+; LA32-NEXT:  .LBB126_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a2, $a0, 0
+; LA32-NEXT:    xor $a3, $a2, $a1
+; LA32-NEXT:    sc.w $a3, $a0, 0
+; LA32-NEXT:    beqz $a3, .LBB126_1
+; LA32-NEXT:  # %bb.2:
+; LA32-NEXT:    move $a0, $a2
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_xor_i32_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    amxor_db.w $a2, $a1, $a0
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw xor ptr %a, i32 %b seq_cst
+  ret i32 %1
+}
+
+define i64 @atomicrmw_xor_i64_seq_cst(ptr %a, i64 %b) nounwind {
+; LA32-LABEL: atomicrmw_xor_i64_seq_cst:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a3, $zero, 5
+; LA32-NEXT:    bl %plt(__atomic_fetch_xor_8)
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: atomicrmw_xor_i64_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    amxor_db.d $a2, $a1, $a0
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+  %1 = atomicrmw xor ptr %a, i64 %b seq_cst
+  ret i64 %1
+}
+
 define i8 @atomicrmw_xchg_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; LA32-LABEL: atomicrmw_xchg_i8_monotonic:
 ; LA32:       # %bb.0:
@@ -1005,14 +3987,14 @@ define i8 @atomicrmw_xchg_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; LA32-NEXT:    andi $a1, $a1, 255
 ; LA32-NEXT:    sll.w $a1, $a1, $a2
 ; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
-; LA32-NEXT:  .LBB32_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:  .LBB128_1: # =>This Inner Loop Header: Depth=1
 ; LA32-NEXT:    ll.w $a4, $a0, 0
 ; LA32-NEXT:    addi.w $a5, $a1, 0
 ; LA32-NEXT:    xor $a5, $a4, $a5
 ; LA32-NEXT:    and $a5, $a5, $a3
 ; LA32-NEXT:    xor $a5, $a4, $a5
 ; LA32-NEXT:    sc.w $a5, $a0, 0
-; LA32-NEXT:    beqz $a5, .LBB32_1
+; LA32-NEXT:    beqz $a5, .LBB128_1
 ; LA32-NEXT:  # %bb.2:
 ; LA32-NEXT:    srl.w $a0, $a4, $a2
 ; LA32-NEXT:    ret
@@ -1027,14 +4009,14 @@ define i8 @atomicrmw_xchg_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
 ; LA64-NEXT:    addi.w $a1, $a1, 0
 ; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
-; LA64-NEXT:  .LBB32_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:  .LBB128_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    addi.w $a5, $a1, 0
 ; LA64-NEXT:    xor $a5, $a4, $a5
 ; LA64-NEXT:    and $a5, $a5, $a3
 ; LA64-NEXT:    xor $a5, $a4, $a5
 ; LA64-NEXT:    sc.w $a5, $a0, 0
-; LA64-NEXT:    beqz $a5, .LBB32_1
+; LA64-NEXT:    beqz $a5, .LBB128_1
 ; LA64-NEXT:  # %bb.2:
 ; LA64-NEXT:    srl.w $a0, $a4, $a2
 ; LA64-NEXT:    ret
@@ -1050,11 +4032,11 @@ define i8 @atomicrmw_xchg_0_i8_monotonic(ptr %a) nounwind {
 ; LA32-NEXT:    sll.w $a2, $a2, $a1
 ; LA32-NEXT:    nor $a2, $a2, $zero
 ; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
-; LA32-NEXT:  .LBB33_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:  .LBB129_1: # =>This Inner Loop Header: Depth=1
 ; LA32-NEXT:    ll.w $a3, $a0, 0
 ; LA32-NEXT:    and $a4, $a3, $a2
 ; LA32-NEXT:    sc.w $a4, $a0, 0
-; LA32-NEXT:    beqz $a4, .LBB33_1
+; LA32-NEXT:    beqz $a4, .LBB129_1
 ; LA32-NEXT:  # %bb.2:
 ; LA32-NEXT:    srl.w $a0, $a3, $a1
 ; LA32-NEXT:    ret
@@ -1080,11 +4062,11 @@ define i8 @atomicrmw_xchg_minus_1_i8_monotonic(ptr %a) nounwind {
 ; LA32-NEXT:    ori $a2, $zero, 255
 ; LA32-NEXT:    sll.w $a2, $a2, $a1
 ; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
-; LA32-NEXT:  .LBB34_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:  .LBB130_1: # =>This Inner Loop Header: Depth=1
 ; LA32-NEXT:    ll.w $a3, $a0, 0
 ; LA32-NEXT:    or $a4, $a3, $a2
 ; LA32-NEXT:    sc.w $a4, $a0, 0
-; LA32-NEXT:    beqz $a4, .LBB34_1
+; LA32-NEXT:    beqz $a4, .LBB130_1
 ; LA32-NEXT:  # %bb.2:
 ; LA32-NEXT:    srl.w $a0, $a3, $a1
 ; LA32-NEXT:    ret
@@ -1112,14 +4094,14 @@ define i16 @atomicrmw_xchg_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; LA32-NEXT:    bstrpick.w $a1, $a1, 15, 0
 ; LA32-NEXT:    sll.w $a1, $a1, $a3
 ; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
-; LA32-NEXT:  .LBB35_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:  .LBB131_1: # =>This Inner Loop Header: Depth=1
 ; LA32-NEXT:    ll.w $a4, $a0, 0
 ; LA32-NEXT:    addi.w $a5, $a1, 0
 ; LA32-NEXT:    xor $a5, $a4, $a5
 ; LA32-NEXT:    and $a5, $a5, $a2
 ; LA32-NEXT:    xor $a5, $a4, $a5
 ; LA32-NEXT:    sc.w $a5, $a0, 0
-; LA32-NEXT:    beqz $a5, .LBB35_1
+; LA32-NEXT:    beqz $a5, .LBB131_1
 ; LA32-NEXT:  # %bb.2:
 ; LA32-NEXT:    srl.w $a0, $a4, $a3
 ; LA32-NEXT:    ret
@@ -1135,14 +4117,14 @@ define i16 @atomicrmw_xchg_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; LA64-NEXT:    sll.w $a1, $a1, $a3
 ; LA64-NEXT:    addi.w $a1, $a1, 0
 ; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
-; LA64-NEXT:  .LBB35_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:  .LBB131_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    addi.w $a5, $a1, 0
 ; LA64-NEXT:    xor $a5, $a4, $a5
 ; LA64-NEXT:    and $a5, $a5, $a2
 ; LA64-NEXT:    xor $a5, $a4, $a5
 ; LA64-NEXT:    sc.w $a5, $a0, 0
-; LA64-NEXT:    beqz $a5, .LBB35_1
+; LA64-NEXT:    beqz $a5, .LBB131_1
 ; LA64-NEXT:  # %bb.2:
 ; LA64-NEXT:    srl.w $a0, $a4, $a3
 ; LA64-NEXT:    ret
@@ -1159,11 +4141,11 @@ define i16 @atomicrmw_xchg_0_i16_monotonic(ptr %a) nounwind {
 ; LA32-NEXT:    sll.w $a1, $a1, $a2
 ; LA32-NEXT:    nor $a1, $a1, $zero
 ; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
-; LA32-NEXT:  .LBB36_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:  .LBB132_1: # =>This Inner Loop Header: Depth=1
 ; LA32-NEXT:    ll.w $a3, $a0, 0
 ; LA32-NEXT:    and $a4, $a3, $a1
 ; LA32-NEXT:    sc.w $a4, $a0, 0
-; LA32-NEXT:    beqz $a4, .LBB36_1
+; LA32-NEXT:    beqz $a4, .LBB132_1
 ; LA32-NEXT:  # %bb.2:
 ; LA32-NEXT:    srl.w $a0, $a3, $a2
 ; LA32-NEXT:    ret
@@ -1191,11 +4173,11 @@ define i16 @atomicrmw_xchg_minus_1_i16_monotonic(ptr %a) nounwind {
 ; LA32-NEXT:    slli.w $a2, $a0, 3
 ; LA32-NEXT:    sll.w $a1, $a1, $a2
 ; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
-; LA32-NEXT:  .LBB37_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:  .LBB133_1: # =>This Inner Loop Header: Depth=1
 ; LA32-NEXT:    ll.w $a3, $a0, 0
 ; LA32-NEXT:    or $a4, $a3, $a1
 ; LA32-NEXT:    sc.w $a4, $a0, 0
-; LA32-NEXT:    beqz $a4, .LBB37_1
+; LA32-NEXT:    beqz $a4, .LBB133_1
 ; LA32-NEXT:  # %bb.2:
 ; LA32-NEXT:    srl.w $a0, $a3, $a2
 ; LA32-NEXT:    ret
@@ -1217,11 +4199,11 @@ define i16 @atomicrmw_xchg_minus_1_i16_monotonic(ptr %a) nounwind {
 define i32 @atomicrmw_xchg_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; LA32-LABEL: atomicrmw_xchg_i32_monotonic:
 ; LA32:       # %bb.0:
-; LA32-NEXT:  .LBB38_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:  .LBB134_1: # =>This Inner Loop Header: Depth=1
 ; LA32-NEXT:    ll.w $a2, $a0, 0
 ; LA32-NEXT:    move $a3, $a1
 ; LA32-NEXT:    sc.w $a3, $a0, 0
-; LA32-NEXT:    beqz $a3, .LBB38_1
+; LA32-NEXT:    beqz $a3, .LBB134_1
 ; LA32-NEXT:  # %bb.2:
 ; LA32-NEXT:    move $a0, $a2
 ; LA32-NEXT:    ret
@@ -1264,14 +4246,14 @@ define i8 @atomicrmw_add_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; LA32-NEXT:    andi $a1, $a1, 255
 ; LA32-NEXT:    sll.w $a1, $a1, $a2
 ; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
-; LA32-NEXT:  .LBB40_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:  .LBB136_1: # =>This Inner Loop Header: Depth=1
 ; LA32-NEXT:    ll.w $a4, $a0, 0
 ; LA32-NEXT:    add.w $a5, $a4, $a1
 ; LA32-NEXT:    xor $a5, $a4, $a5
 ; LA32-NEXT:    and $a5, $a5, $a3
 ; LA32-NEXT:    xor $a5, $a4, $a5
 ; LA32-NEXT:    sc.w $a5, $a0, 0
-; LA32-NEXT:    beqz $a5, .LBB40_1
+; LA32-NEXT:    beqz $a5, .LBB136_1
 ; LA32-NEXT:  # %bb.2:
 ; LA32-NEXT:    srl.w $a0, $a4, $a2
 ; LA32-NEXT:    ret
@@ -1286,14 +4268,14 @@ define i8 @atomicrmw_add_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
 ; LA64-NEXT:    addi.w $a1, $a1, 0
 ; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
-; LA64-NEXT:  .LBB40_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:  .LBB136_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    add.w $a5, $a4, $a1
 ; LA64-NEXT:    xor $a5, $a4, $a5
 ; LA64-NEXT:    and $a5, $a5, $a3
 ; LA64-NEXT:    xor $a5, $a4, $a5
 ; LA64-NEXT:    sc.w $a5, $a0, 0
-; LA64-NEXT:    beqz $a5, .LBB40_1
+; LA64-NEXT:    beqz $a5, .LBB136_1
 ; LA64-NEXT:  # %bb.2:
 ; LA64-NEXT:    srl.w $a0, $a4, $a2
 ; LA64-NEXT:    ret
@@ -1311,14 +4293,14 @@ define i16 @atomicrmw_add_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; LA32-NEXT:    bstrpick.w $a1, $a1, 15, 0
 ; LA32-NEXT:    sll.w $a1, $a1, $a3
 ; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
-; LA32-NEXT:  .LBB41_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:  .LBB137_1: # =>This Inner Loop Header: Depth=1
 ; LA32-NEXT:    ll.w $a4, $a0, 0
 ; LA32-NEXT:    add.w $a5, $a4, $a1
 ; LA32-NEXT:    xor $a5, $a4, $a5
 ; LA32-NEXT:    and $a5, $a5, $a2
 ; LA32-NEXT:    xor $a5, $a4, $a5
 ; LA32-NEXT:    sc.w $a5, $a0, 0
-; LA32-NEXT:    beqz $a5, .LBB41_1
+; LA32-NEXT:    beqz $a5, .LBB137_1
 ; LA32-NEXT:  # %bb.2:
 ; LA32-NEXT:    srl.w $a0, $a4, $a3
 ; LA32-NEXT:    ret
@@ -1334,14 +4316,14 @@ define i16 @atomicrmw_add_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; LA64-NEXT:    sll.w $a1, $a1, $a3
 ; LA64-NEXT:    addi.w $a1, $a1, 0
 ; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
-; LA64-NEXT:  .LBB41_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:  .LBB137_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    add.w $a5, $a4, $a1
 ; LA64-NEXT:    xor $a5, $a4, $a5
 ; LA64-NEXT:    and $a5, $a5, $a2
 ; LA64-NEXT:    xor $a5, $a4, $a5
 ; LA64-NEXT:    sc.w $a5, $a0, 0
-; LA64-NEXT:    beqz $a5, .LBB41_1
+; LA64-NEXT:    beqz $a5, .LBB137_1
 ; LA64-NEXT:  # %bb.2:
 ; LA64-NEXT:    srl.w $a0, $a4, $a3
 ; LA64-NEXT:    ret
@@ -1352,11 +4334,11 @@ define i16 @atomicrmw_add_i16_monotonic(ptr %a, i16 %b) nounwind {
 define i32 @atomicrmw_add_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; LA32-LABEL: atomicrmw_add_i32_monotonic:
 ; LA32:       # %bb.0:
-; LA32-NEXT:  .LBB42_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:  .LBB138_1: # =>This Inner Loop Header: Depth=1
 ; LA32-NEXT:    ll.w $a2, $a0, 0
 ; LA32-NEXT:    add.w $a3, $a2, $a1
 ; LA32-NEXT:    sc.w $a3, $a0, 0
-; LA32-NEXT:    beqz $a3, .LBB42_1
+; LA32-NEXT:    beqz $a3, .LBB138_1
 ; LA32-NEXT:  # %bb.2:
 ; LA32-NEXT:    move $a0, $a2
 ; LA32-NEXT:    ret
@@ -1399,14 +4381,14 @@ define i8 @atomicrmw_sub_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; LA32-NEXT:    andi $a1, $a1, 255
 ; LA32-NEXT:    sll.w $a1, $a1, $a2
 ; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
-; LA32-NEXT:  .LBB44_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:  .LBB140_1: # =>This Inner Loop Header: Depth=1
 ; LA32-NEXT:    ll.w $a4, $a0, 0
 ; LA32-NEXT:    sub.w $a5, $a4, $a1
 ; LA32-NEXT:    xor $a5, $a4, $a5
 ; LA32-NEXT:    and $a5, $a5, $a3
 ; LA32-NEXT:    xor $a5, $a4, $a5
 ; LA32-NEXT:    sc.w $a5, $a0, 0
-; LA32-NEXT:    beqz $a5, .LBB44_1
+; LA32-NEXT:    beqz $a5, .LBB140_1
 ; LA32-NEXT:  # %bb.2:
 ; LA32-NEXT:    srl.w $a0, $a4, $a2
 ; LA32-NEXT:    ret
@@ -1421,14 +4403,14 @@ define i8 @atomicrmw_sub_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
 ; LA64-NEXT:    addi.w $a1, $a1, 0
 ; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
-; LA64-NEXT:  .LBB44_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:  .LBB140_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    sub.w $a5, $a4, $a1
 ; LA64-NEXT:    xor $a5, $a4, $a5
 ; LA64-NEXT:    and $a5, $a5, $a3
 ; LA64-NEXT:    xor $a5, $a4, $a5
 ; LA64-NEXT:    sc.w $a5, $a0, 0
-; LA64-NEXT:    beqz $a5, .LBB44_1
+; LA64-NEXT:    beqz $a5, .LBB140_1
 ; LA64-NEXT:  # %bb.2:
 ; LA64-NEXT:    srl.w $a0, $a4, $a2
 ; LA64-NEXT:    ret
@@ -1446,14 +4428,14 @@ define i16 @atomicrmw_sub_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; LA32-NEXT:    bstrpick.w $a1, $a1, 15, 0
 ; LA32-NEXT:    sll.w $a1, $a1, $a3
 ; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
-; LA32-NEXT:  .LBB45_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:  .LBB141_1: # =>This Inner Loop Header: Depth=1
 ; LA32-NEXT:    ll.w $a4, $a0, 0
 ; LA32-NEXT:    sub.w $a5, $a4, $a1
 ; LA32-NEXT:    xor $a5, $a4, $a5
 ; LA32-NEXT:    and $a5, $a5, $a2
 ; LA32-NEXT:    xor $a5, $a4, $a5
 ; LA32-NEXT:    sc.w $a5, $a0, 0
-; LA32-NEXT:    beqz $a5, .LBB45_1
+; LA32-NEXT:    beqz $a5, .LBB141_1
 ; LA32-NEXT:  # %bb.2:
 ; LA32-NEXT:    srl.w $a0, $a4, $a3
 ; LA32-NEXT:    ret
@@ -1469,14 +4451,14 @@ define i16 @atomicrmw_sub_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; LA64-NEXT:    sll.w $a1, $a1, $a3
 ; LA64-NEXT:    addi.w $a1, $a1, 0
 ; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
-; LA64-NEXT:  .LBB45_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:  .LBB141_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    sub.w $a5, $a4, $a1
 ; LA64-NEXT:    xor $a5, $a4, $a5
 ; LA64-NEXT:    and $a5, $a5, $a2
 ; LA64-NEXT:    xor $a5, $a4, $a5
 ; LA64-NEXT:    sc.w $a5, $a0, 0
-; LA64-NEXT:    beqz $a5, .LBB45_1
+; LA64-NEXT:    beqz $a5, .LBB141_1
 ; LA64-NEXT:  # %bb.2:
 ; LA64-NEXT:    srl.w $a0, $a4, $a3
 ; LA64-NEXT:    ret
@@ -1487,11 +4469,11 @@ define i16 @atomicrmw_sub_i16_monotonic(ptr %a, i16 %b) nounwind {
 define i32 @atomicrmw_sub_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; LA32-LABEL: atomicrmw_sub_i32_monotonic:
 ; LA32:       # %bb.0:
-; LA32-NEXT:  .LBB46_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:  .LBB142_1: # =>This Inner Loop Header: Depth=1
 ; LA32-NEXT:    ll.w $a2, $a0, 0
 ; LA32-NEXT:    sub.w $a3, $a2, $a1
 ; LA32-NEXT:    sc.w $a3, $a0, 0
-; LA32-NEXT:    beqz $a3, .LBB46_1
+; LA32-NEXT:    beqz $a3, .LBB142_1
 ; LA32-NEXT:  # %bb.2:
 ; LA32-NEXT:    move $a0, $a2
 ; LA32-NEXT:    ret
@@ -1536,7 +4518,7 @@ define i8 @atomicrmw_nand_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; LA32-NEXT:    andi $a1, $a1, 255
 ; LA32-NEXT:    sll.w $a1, $a1, $a2
 ; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
-; LA32-NEXT:  .LBB48_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:  .LBB144_1: # =>This Inner Loop Header: Depth=1
 ; LA32-NEXT:    ll.w $a4, $a0, 0
 ; LA32-NEXT:    and $a5, $a4, $a1
 ; LA32-NEXT:    nor $a5, $a5, $zero
@@ -1544,7 +4526,7 @@ define i8 @atomicrmw_nand_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; LA32-NEXT:    and $a5, $a5, $a3
 ; LA32-NEXT:    xor $a5, $a4, $a5
 ; LA32-NEXT:    sc.w $a5, $a0, 0
-; LA32-NEXT:    beqz $a5, .LBB48_1
+; LA32-NEXT:    beqz $a5, .LBB144_1
 ; LA32-NEXT:  # %bb.2:
 ; LA32-NEXT:    srl.w $a0, $a4, $a2
 ; LA32-NEXT:    ret
@@ -1559,7 +4541,7 @@ define i8 @atomicrmw_nand_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
 ; LA64-NEXT:    addi.w $a1, $a1, 0
 ; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
-; LA64-NEXT:  .LBB48_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:  .LBB144_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    and $a5, $a4, $a1
 ; LA64-NEXT:    nor $a5, $a5, $zero
@@ -1567,7 +4549,7 @@ define i8 @atomicrmw_nand_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; LA64-NEXT:    and $a5, $a5, $a3
 ; LA64-NEXT:    xor $a5, $a4, $a5
 ; LA64-NEXT:    sc.w $a5, $a0, 0
-; LA64-NEXT:    beqz $a5, .LBB48_1
+; LA64-NEXT:    beqz $a5, .LBB144_1
 ; LA64-NEXT:  # %bb.2:
 ; LA64-NEXT:    srl.w $a0, $a4, $a2
 ; LA64-NEXT:    ret
@@ -1585,7 +4567,7 @@ define i16 @atomicrmw_nand_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; LA32-NEXT:    bstrpick.w $a1, $a1, 15, 0
 ; LA32-NEXT:    sll.w $a1, $a1, $a3
 ; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
-; LA32-NEXT:  .LBB49_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:  .LBB145_1: # =>This Inner Loop Header: Depth=1
 ; LA32-NEXT:    ll.w $a4, $a0, 0
 ; LA32-NEXT:    and $a5, $a4, $a1
 ; LA32-NEXT:    nor $a5, $a5, $zero
@@ -1593,7 +4575,7 @@ define i16 @atomicrmw_nand_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; LA32-NEXT:    and $a5, $a5, $a2
 ; LA32-NEXT:    xor $a5, $a4, $a5
 ; LA32-NEXT:    sc.w $a5, $a0, 0
-; LA32-NEXT:    beqz $a5, .LBB49_1
+; LA32-NEXT:    beqz $a5, .LBB145_1
 ; LA32-NEXT:  # %bb.2:
 ; LA32-NEXT:    srl.w $a0, $a4, $a3
 ; LA32-NEXT:    ret
@@ -1609,7 +4591,7 @@ define i16 @atomicrmw_nand_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; LA64-NEXT:    sll.w $a1, $a1, $a3
 ; LA64-NEXT:    addi.w $a1, $a1, 0
 ; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
-; LA64-NEXT:  .LBB49_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:  .LBB145_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    and $a5, $a4, $a1
 ; LA64-NEXT:    nor $a5, $a5, $zero
@@ -1617,7 +4599,7 @@ define i16 @atomicrmw_nand_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; LA64-NEXT:    and $a5, $a5, $a2
 ; LA64-NEXT:    xor $a5, $a4, $a5
 ; LA64-NEXT:    sc.w $a5, $a0, 0
-; LA64-NEXT:    beqz $a5, .LBB49_1
+; LA64-NEXT:    beqz $a5, .LBB145_1
 ; LA64-NEXT:  # %bb.2:
 ; LA64-NEXT:    srl.w $a0, $a4, $a3
 ; LA64-NEXT:    ret
@@ -1628,24 +4610,24 @@ define i16 @atomicrmw_nand_i16_monotonic(ptr %a, i16 %b) nounwind {
 define i32 @atomicrmw_nand_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; LA32-LABEL: atomicrmw_nand_i32_monotonic:
 ; LA32:       # %bb.0:
-; LA32-NEXT:  .LBB50_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:  .LBB146_1: # =>This Inner Loop Header: Depth=1
 ; LA32-NEXT:    ll.w $a2, $a0, 0
 ; LA32-NEXT:    and $a3, $a2, $a1
 ; LA32-NEXT:    nor $a3, $a3, $zero
 ; LA32-NEXT:    sc.w $a3, $a0, 0
-; LA32-NEXT:    beqz $a3, .LBB50_1
+; LA32-NEXT:    beqz $a3, .LBB146_1
 ; LA32-NEXT:  # %bb.2:
 ; LA32-NEXT:    move $a0, $a2
 ; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: atomicrmw_nand_i32_monotonic:
 ; LA64:       # %bb.0:
-; LA64-NEXT:  .LBB50_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:  .LBB146_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a2, $a0, 0
 ; LA64-NEXT:    and $a3, $a2, $a1
 ; LA64-NEXT:    nor $a3, $a3, $zero
 ; LA64-NEXT:    sc.w $a3, $a0, 0
-; LA64-NEXT:    beqz $a3, .LBB50_1
+; LA64-NEXT:    beqz $a3, .LBB146_1
 ; LA64-NEXT:  # %bb.2:
 ; LA64-NEXT:    move $a0, $a2
 ; LA64-NEXT:    ret
@@ -1666,12 +4648,12 @@ define i64 @atomicrmw_nand_i64_monotonic(ptr %a, i64 %b) nounwind {
 ;
 ; LA64-LABEL: atomicrmw_nand_i64_monotonic:
 ; LA64:       # %bb.0:
-; LA64-NEXT:  .LBB51_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:  .LBB147_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.d $a2, $a0, 0
 ; LA64-NEXT:    and $a3, $a2, $a1
 ; LA64-NEXT:    nor $a3, $a3, $zero
 ; LA64-NEXT:    sc.d $a3, $a0, 0
-; LA64-NEXT:    beqz $a3, .LBB51_1
+; LA64-NEXT:    beqz $a3, .LBB147_1
 ; LA64-NEXT:  # %bb.2:
 ; LA64-NEXT:    move $a0, $a2
 ; LA64-NEXT:    ret
@@ -1689,11 +4671,11 @@ define i8 @atomicrmw_and_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; LA32-NEXT:    sll.w $a1, $a1, $a2
 ; LA32-NEXT:    orn $a1, $a1, $a3
 ; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
-; LA32-NEXT:  .LBB52_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:  .LBB148_1: # =>This Inner Loop Header: Depth=1
 ; LA32-NEXT:    ll.w $a3, $a0, 0
 ; LA32-NEXT:    and $a4, $a3, $a1
 ; LA32-NEXT:    sc.w $a4, $a0, 0
-; LA32-NEXT:    beqz $a4, .LBB52_1
+; LA32-NEXT:    beqz $a4, .LBB148_1
 ; LA32-NEXT:  # %bb.2:
 ; LA32-NEXT:    srl.w $a0, $a3, $a2
 ; LA32-NEXT:    ret
@@ -1725,11 +4707,11 @@ define i16 @atomicrmw_and_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; LA32-NEXT:    sll.w $a1, $a1, $a3
 ; LA32-NEXT:    orn $a1, $a1, $a2
 ; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
-; LA32-NEXT:  .LBB53_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:  .LBB149_1: # =>This Inner Loop Header: Depth=1
 ; LA32-NEXT:    ll.w $a2, $a0, 0
 ; LA32-NEXT:    and $a4, $a2, $a1
 ; LA32-NEXT:    sc.w $a4, $a0, 0
-; LA32-NEXT:    beqz $a4, .LBB53_1
+; LA32-NEXT:    beqz $a4, .LBB149_1
 ; LA32-NEXT:  # %bb.2:
 ; LA32-NEXT:    srl.w $a0, $a2, $a3
 ; LA32-NEXT:    ret
@@ -1754,11 +4736,11 @@ define i16 @atomicrmw_and_i16_monotonic(ptr %a, i16 %b) nounwind {
 define i32 @atomicrmw_and_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; LA32-LABEL: atomicrmw_and_i32_monotonic:
 ; LA32:       # %bb.0:
-; LA32-NEXT:  .LBB54_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:  .LBB150_1: # =>This Inner Loop Header: Depth=1
 ; LA32-NEXT:    ll.w $a2, $a0, 0
 ; LA32-NEXT:    and $a3, $a2, $a1
 ; LA32-NEXT:    sc.w $a3, $a0, 0
-; LA32-NEXT:    beqz $a3, .LBB54_1
+; LA32-NEXT:    beqz $a3, .LBB150_1
 ; LA32-NEXT:  # %bb.2:
 ; LA32-NEXT:    move $a0, $a2
 ; LA32-NEXT:    ret
@@ -1799,11 +4781,11 @@ define i8 @atomicrmw_or_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; LA32-NEXT:    slli.w $a2, $a0, 3
 ; LA32-NEXT:    sll.w $a1, $a1, $a2
 ; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
-; LA32-NEXT:  .LBB56_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:  .LBB152_1: # =>This Inner Loop Header: Depth=1
 ; LA32-NEXT:    ll.w $a3, $a0, 0
 ; LA32-NEXT:    or $a4, $a3, $a1
 ; LA32-NEXT:    sc.w $a4, $a0, 0
-; LA32-NEXT:    beqz $a4, .LBB56_1
+; LA32-NEXT:    beqz $a4, .LBB152_1
 ; LA32-NEXT:  # %bb.2:
 ; LA32-NEXT:    srl.w $a0, $a3, $a2
 ; LA32-NEXT:    ret
@@ -1828,11 +4810,11 @@ define i16 @atomicrmw_or_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; LA32-NEXT:    slli.w $a2, $a0, 3
 ; LA32-NEXT:    sll.w $a1, $a1, $a2
 ; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
-; LA32-NEXT:  .LBB57_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:  .LBB153_1: # =>This Inner Loop Header: Depth=1
 ; LA32-NEXT:    ll.w $a3, $a0, 0
 ; LA32-NEXT:    or $a4, $a3, $a1
 ; LA32-NEXT:    sc.w $a4, $a0, 0
-; LA32-NEXT:    beqz $a4, .LBB57_1
+; LA32-NEXT:    beqz $a4, .LBB153_1
 ; LA32-NEXT:  # %bb.2:
 ; LA32-NEXT:    srl.w $a0, $a3, $a2
 ; LA32-NEXT:    ret
@@ -1853,11 +4835,11 @@ define i16 @atomicrmw_or_i16_monotonic(ptr %a, i16 %b) nounwind {
 define i32 @atomicrmw_or_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; LA32-LABEL: atomicrmw_or_i32_monotonic:
 ; LA32:       # %bb.0:
-; LA32-NEXT:  .LBB58_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:  .LBB154_1: # =>This Inner Loop Header: Depth=1
 ; LA32-NEXT:    ll.w $a2, $a0, 0
 ; LA32-NEXT:    or $a3, $a2, $a1
 ; LA32-NEXT:    sc.w $a3, $a0, 0
-; LA32-NEXT:    beqz $a3, .LBB58_1
+; LA32-NEXT:    beqz $a3, .LBB154_1
 ; LA32-NEXT:  # %bb.2:
 ; LA32-NEXT:    move $a0, $a2
 ; LA32-NEXT:    ret
@@ -1898,11 +4880,11 @@ define i8 @atomicrmw_xor_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; LA32-NEXT:    slli.w $a2, $a0, 3
 ; LA32-NEXT:    sll.w $a1, $a1, $a2
 ; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
-; LA32-NEXT:  .LBB60_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:  .LBB156_1: # =>This Inner Loop Header: Depth=1
 ; LA32-NEXT:    ll.w $a3, $a0, 0
 ; LA32-NEXT:    xor $a4, $a3, $a1
 ; LA32-NEXT:    sc.w $a4, $a0, 0
-; LA32-NEXT:    beqz $a4, .LBB60_1
+; LA32-NEXT:    beqz $a4, .LBB156_1
 ; LA32-NEXT:  # %bb.2:
 ; LA32-NEXT:    srl.w $a0, $a3, $a2
 ; LA32-NEXT:    ret
@@ -1927,11 +4909,11 @@ define i16 @atomicrmw_xor_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; LA32-NEXT:    slli.w $a2, $a0, 3
 ; LA32-NEXT:    sll.w $a1, $a1, $a2
 ; LA32-NEXT:    bstrins.w $a0, $zero, 1, 0
-; LA32-NEXT:  .LBB61_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:  .LBB157_1: # =>This Inner Loop Header: Depth=1
 ; LA32-NEXT:    ll.w $a3, $a0, 0
 ; LA32-NEXT:    xor $a4, $a3, $a1
 ; LA32-NEXT:    sc.w $a4, $a0, 0
-; LA32-NEXT:    beqz $a4, .LBB61_1
+; LA32-NEXT:    beqz $a4, .LBB157_1
 ; LA32-NEXT:  # %bb.2:
 ; LA32-NEXT:    srl.w $a0, $a3, $a2
 ; LA32-NEXT:    ret
@@ -1952,11 +4934,11 @@ define i16 @atomicrmw_xor_i16_monotonic(ptr %a, i16 %b) nounwind {
 define i32 @atomicrmw_xor_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; LA32-LABEL: atomicrmw_xor_i32_monotonic:
 ; LA32:       # %bb.0:
-; LA32-NEXT:  .LBB62_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:  .LBB158_1: # =>This Inner Loop Header: Depth=1
 ; LA32-NEXT:    ll.w $a2, $a0, 0
 ; LA32-NEXT:    xor $a3, $a2, $a1
 ; LA32-NEXT:    sc.w $a3, $a0, 0
-; LA32-NEXT:    beqz $a3, .LBB62_1
+; LA32-NEXT:    beqz $a3, .LBB158_1
 ; LA32-NEXT:  # %bb.2:
 ; LA32-NEXT:    move $a0, $a2
 ; LA32-NEXT:    ret

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/fence-singlethread.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/fence-singlethread.ll
new file mode 100644
index 000000000000000..8d6056bc7677f9f
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/fence-singlethread.ll
@@ -0,0 +1,17 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch32 < %s | FileCheck %s --check-prefix=LA32
+; RUN: llc --mtriple=loongarch64 < %s | FileCheck %s --check-prefix=LA64
+
+define void @fence_singlethread() {
+; LA32-LABEL: fence_singlethread:
+; LA32:       # %bb.0:
+; LA32-NEXT:    dbar 0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fence_singlethread:
+; LA64:       # %bb.0:
+; LA64-NEXT:    dbar 0
+; LA64-NEXT:    ret
+  fence syncscope("singlethread") seq_cst
+  ret void
+}

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/load-store-atomic.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/load-store-atomic.ll
index e91d0c145eab6e1..deff11723d2729c 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/load-store-atomic.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/load-store-atomic.ll
@@ -72,6 +72,202 @@ define i64 @load_acquire_i64(ptr %ptr) {
   ret i64 %val
 }
 
+define i8 @load_unordered_i8(ptr %ptr) {
+; LA32-LABEL: load_unordered_i8:
+; LA32:       # %bb.0:
+; LA32-NEXT:    ld.b $a0, $a0, 0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: load_unordered_i8:
+; LA64:       # %bb.0:
+; LA64-NEXT:    ld.b $a0, $a0, 0
+; LA64-NEXT:    ret
+  %val = load atomic i8, ptr %ptr unordered, align 1
+  ret i8 %val
+}
+
+define i16 @load_unordered_i16(ptr %ptr) {
+; LA32-LABEL: load_unordered_i16:
+; LA32:       # %bb.0:
+; LA32-NEXT:    ld.h $a0, $a0, 0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: load_unordered_i16:
+; LA64:       # %bb.0:
+; LA64-NEXT:    ld.h $a0, $a0, 0
+; LA64-NEXT:    ret
+  %val = load atomic i16, ptr %ptr unordered, align 2
+  ret i16 %val
+}
+
+define i32 @load_unordered_i32(ptr %ptr) {
+; LA32-LABEL: load_unordered_i32:
+; LA32:       # %bb.0:
+; LA32-NEXT:    ld.w $a0, $a0, 0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: load_unordered_i32:
+; LA64:       # %bb.0:
+; LA64-NEXT:    ld.w $a0, $a0, 0
+; LA64-NEXT:    ret
+  %val = load atomic i32, ptr %ptr unordered, align 4
+  ret i32 %val
+}
+
+define i64 @load_unordered_i64(ptr %ptr) {
+; LA32-LABEL: load_unordered_i64:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    .cfi_def_cfa_offset 16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    .cfi_offset 1, -4
+; LA32-NEXT:    move $a1, $zero
+; LA32-NEXT:    bl %plt(__atomic_load_8)
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: load_unordered_i64:
+; LA64:       # %bb.0:
+; LA64-NEXT:    ld.d $a0, $a0, 0
+; LA64-NEXT:    ret
+  %val = load atomic i64, ptr %ptr unordered, align 8
+  ret i64 %val
+}
+
+define i8 @load_monotonic_i8(ptr %ptr) {
+; LA32-LABEL: load_monotonic_i8:
+; LA32:       # %bb.0:
+; LA32-NEXT:    ld.b $a0, $a0, 0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: load_monotonic_i8:
+; LA64:       # %bb.0:
+; LA64-NEXT:    ld.b $a0, $a0, 0
+; LA64-NEXT:    ret
+  %val = load atomic i8, ptr %ptr monotonic, align 1
+  ret i8 %val
+}
+
+define i16 @load_monotonic_i16(ptr %ptr) {
+; LA32-LABEL: load_monotonic_i16:
+; LA32:       # %bb.0:
+; LA32-NEXT:    ld.h $a0, $a0, 0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: load_monotonic_i16:
+; LA64:       # %bb.0:
+; LA64-NEXT:    ld.h $a0, $a0, 0
+; LA64-NEXT:    ret
+  %val = load atomic i16, ptr %ptr monotonic, align 2
+  ret i16 %val
+}
+
+define i32 @load_monotonic_i32(ptr %ptr) {
+; LA32-LABEL: load_monotonic_i32:
+; LA32:       # %bb.0:
+; LA32-NEXT:    ld.w $a0, $a0, 0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: load_monotonic_i32:
+; LA64:       # %bb.0:
+; LA64-NEXT:    ld.w $a0, $a0, 0
+; LA64-NEXT:    ret
+  %val = load atomic i32, ptr %ptr monotonic, align 4
+  ret i32 %val
+}
+
+define i64 @load_monotonic_i64(ptr %ptr) {
+; LA32-LABEL: load_monotonic_i64:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    .cfi_def_cfa_offset 16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    .cfi_offset 1, -4
+; LA32-NEXT:    move $a1, $zero
+; LA32-NEXT:    bl %plt(__atomic_load_8)
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: load_monotonic_i64:
+; LA64:       # %bb.0:
+; LA64-NEXT:    ld.d $a0, $a0, 0
+; LA64-NEXT:    ret
+  %val = load atomic i64, ptr %ptr monotonic, align 8
+  ret i64 %val
+}
+
+define i8 @load_seq_cst_i8(ptr %ptr) {
+; LA32-LABEL: load_seq_cst_i8:
+; LA32:       # %bb.0:
+; LA32-NEXT:    ld.b $a0, $a0, 0
+; LA32-NEXT:    dbar 0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: load_seq_cst_i8:
+; LA64:       # %bb.0:
+; LA64-NEXT:    ld.b $a0, $a0, 0
+; LA64-NEXT:    dbar 0
+; LA64-NEXT:    ret
+  %val = load atomic i8, ptr %ptr seq_cst, align 1
+  ret i8 %val
+}
+
+define i16 @load_seq_cst_i16(ptr %ptr) {
+; LA32-LABEL: load_seq_cst_i16:
+; LA32:       # %bb.0:
+; LA32-NEXT:    ld.h $a0, $a0, 0
+; LA32-NEXT:    dbar 0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: load_seq_cst_i16:
+; LA64:       # %bb.0:
+; LA64-NEXT:    ld.h $a0, $a0, 0
+; LA64-NEXT:    dbar 0
+; LA64-NEXT:    ret
+  %val = load atomic i16, ptr %ptr seq_cst, align 2
+  ret i16 %val
+}
+
+define i32 @load_seq_cst_i32(ptr %ptr) {
+; LA32-LABEL: load_seq_cst_i32:
+; LA32:       # %bb.0:
+; LA32-NEXT:    ld.w $a0, $a0, 0
+; LA32-NEXT:    dbar 0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: load_seq_cst_i32:
+; LA64:       # %bb.0:
+; LA64-NEXT:    ld.w $a0, $a0, 0
+; LA64-NEXT:    dbar 0
+; LA64-NEXT:    ret
+  %val = load atomic i32, ptr %ptr seq_cst, align 4
+  ret i32 %val
+}
+
+define i64 @load_seq_cst_i64(ptr %ptr) {
+; LA32-LABEL: load_seq_cst_i64:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    .cfi_def_cfa_offset 16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    .cfi_offset 1, -4
+; LA32-NEXT:    ori $a1, $zero, 5
+; LA32-NEXT:    bl %plt(__atomic_load_8)
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: load_seq_cst_i64:
+; LA64:       # %bb.0:
+; LA64-NEXT:    ld.d $a0, $a0, 0
+; LA64-NEXT:    dbar 0
+; LA64-NEXT:    ret
+  %val = load atomic i64, ptr %ptr seq_cst, align 8
+  ret i64 %val
+}
+
 define void @store_release_i8(ptr %ptr, i8 signext %v) {
 ; LA32-LABEL: store_release_i8:
 ; LA32:       # %bb.0:


        


More information about the llvm-commits mailing list