[llvm] [LoongArch] Add codegen support for atomic-ops on LA32 (PR #141557)

via llvm-commits llvm-commits at lists.llvm.org
Tue May 27 00:46:40 PDT 2025


https://github.com/heiher created https://github.com/llvm/llvm-project/pull/141557

This patch adds codegen support for atomic operations `cmpxchg`, `max`, `min`, `umax` and `umin` on the LA32 target.

>From f5c5cb52a65ed7caab41752a102f326b3105cfdc Mon Sep 17 00:00:00 2001
From: WANG Rui <wangrui at loongson.cn>
Date: Fri, 23 May 2025 15:23:14 +0800
Subject: [PATCH] [LoongArch] Add codegen support for atomic-ops on LA32

This patch adds codegen support for atomic operations `cmpxchg`, `max`,
`min`, `umax` and `umin` on the LA32 target.
---
 llvm/include/llvm/IR/IntrinsicsLoongArch.td   |    7 +-
 .../LoongArchExpandAtomicPseudoInsts.cpp      |   89 +-
 .../LoongArch/LoongArchISelLowering.cpp       |   25 +-
 .../Target/LoongArch/LoongArchInstrInfo.td    |   57 +-
 .../ir-instruction/atomicrmw-minmax.ll        | 2622 +++++++++++++++++
 5 files changed, 2740 insertions(+), 60 deletions(-)

diff --git a/llvm/include/llvm/IR/IntrinsicsLoongArch.td b/llvm/include/llvm/IR/IntrinsicsLoongArch.td
index 4621f1689b46e..f5e3d412666a2 100644
--- a/llvm/include/llvm/IR/IntrinsicsLoongArch.td
+++ b/llvm/include/llvm/IR/IntrinsicsLoongArch.td
@@ -25,12 +25,13 @@ class MaskedAtomicRMW<LLVMType itype>
 multiclass MaskedAtomicRMWIntrinsics {
   // i32 @llvm.<name>.i32.<p>(any*, i32, i32, i32 imm);
   def _i32 : MaskedAtomicRMW<llvm_i32_ty>;
-  // i64 @llvm.<name>.i32.<p>(any*, i64, i64, i64 imm);
+  // i64 @llvm.<name>.i64.<p>(any*, i64, i64, i64 imm);
   def _i64 : MaskedAtomicRMW<llvm_i64_ty>;
 }
 
 multiclass MaskedAtomicRMWFiveOpIntrinsics {
-  // TODO: Support cmpxchg on LA32.
+  // i32 @llvm.<name>.i32.<p>(any*, i32, i32, i32, i32 imm);
+  def _i32 : MaskedAtomicRMWFiveArg<llvm_i32_ty>;
   // i64 @llvm.<name>.i64.<p>(any*, i64, i64, i64, i64 imm);
   def _i64 : MaskedAtomicRMWFiveArg<llvm_i64_ty>;
 }
@@ -44,7 +45,7 @@ defm int_loongarch_masked_atomicrmw_umin : MaskedAtomicRMWIntrinsics;
 defm int_loongarch_masked_atomicrmw_max : MaskedAtomicRMWFiveOpIntrinsics;
 defm int_loongarch_masked_atomicrmw_min : MaskedAtomicRMWFiveOpIntrinsics;
 
-// @llvm.loongarch.masked.cmpxchg.i64.<p>(
+// @llvm.loongarch.masked.cmpxchg.<i32,i64>.<p>(
 //   ptr addr, grlen cmpval, grlen newval, grlen mask, grlenimm ordering)
 defm int_loongarch_masked_cmpxchg : MaskedAtomicRMWFiveOpIntrinsics;
 
diff --git a/llvm/lib/Target/LoongArch/LoongArchExpandAtomicPseudoInsts.cpp b/llvm/lib/Target/LoongArch/LoongArchExpandAtomicPseudoInsts.cpp
index 3be012feb2385..73874fccc0308 100644
--- a/llvm/lib/Target/LoongArch/LoongArchExpandAtomicPseudoInsts.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchExpandAtomicPseudoInsts.cpp
@@ -122,6 +122,18 @@ bool LoongArchExpandAtomicPseudo::expandMI(
   case LoongArch::PseudoAtomicLoadXor32:
     return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Xor, false, 32,
                              NextMBBI);
+  case LoongArch::PseudoAtomicLoadUMax32:
+    return expandAtomicMinMaxOp(MBB, MBBI, AtomicRMWInst::UMax, false, 32,
+                                NextMBBI);
+  case LoongArch::PseudoAtomicLoadUMin32:
+    return expandAtomicMinMaxOp(MBB, MBBI, AtomicRMWInst::UMin, false, 32,
+                                NextMBBI);
+  case LoongArch::PseudoAtomicLoadMax32:
+    return expandAtomicMinMaxOp(MBB, MBBI, AtomicRMWInst::Max, false, 32,
+                                NextMBBI);
+  case LoongArch::PseudoAtomicLoadMin32:
+    return expandAtomicMinMaxOp(MBB, MBBI, AtomicRMWInst::Min, false, 32,
+                                NextMBBI);
   case LoongArch::PseudoMaskedAtomicLoadUMax32:
     return expandAtomicMinMaxOp(MBB, MBBI, AtomicRMWInst::UMax, true, 32,
                                 NextMBBI);
@@ -356,8 +368,6 @@ bool LoongArchExpandAtomicPseudo::expandAtomicMinMaxOp(
     MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
     AtomicRMWInst::BinOp BinOp, bool IsMasked, int Width,
     MachineBasicBlock::iterator &NextMBBI) {
-  assert(IsMasked == true &&
-         "Should only need to expand masked atomic max/min");
   assert(Width == 32 && "Should never need to expand masked 64-bit operations");
 
   MachineInstr &MI = *MBBI;
@@ -385,79 +395,92 @@ bool LoongArchExpandAtomicPseudo::expandAtomicMinMaxOp(
   MBB.addSuccessor(LoopHeadMBB);
 
   Register DestReg = MI.getOperand(0).getReg();
-  Register Scratch1Reg = MI.getOperand(1).getReg();
-  Register Scratch2Reg = MI.getOperand(2).getReg();
-  Register AddrReg = MI.getOperand(3).getReg();
-  Register IncrReg = MI.getOperand(4).getReg();
-  Register MaskReg = MI.getOperand(5).getReg();
+  Register ScratchReg = MI.getOperand(1).getReg();
+  Register AddrReg = MI.getOperand(IsMasked ? 3 : 2).getReg();
+  Register IncrReg = MI.getOperand(IsMasked ? 4 : 3).getReg();
+  Register CmprReg = DestReg;
 
   //
   // .loophead:
   //   ll.w destreg, (alignedaddr)
-  //   and scratch2, destreg, mask
-  //   move scratch1, destreg
   BuildMI(LoopHeadMBB, DL, TII->get(LoongArch::LL_W), DestReg)
       .addReg(AddrReg)
       .addImm(0);
-  BuildMI(LoopHeadMBB, DL, TII->get(LoongArch::AND), Scratch2Reg)
-      .addReg(DestReg)
-      .addReg(MaskReg);
-  BuildMI(LoopHeadMBB, DL, TII->get(LoongArch::OR), Scratch1Reg)
+  //   and cmpr, destreg, mask
+  if (IsMasked) {
+    Register MaskReg = MI.getOperand(5).getReg();
+    CmprReg = MI.getOperand(2).getReg();
+    BuildMI(LoopHeadMBB, DL, TII->get(LoongArch::AND), CmprReg)
+        .addReg(DestReg)
+        .addReg(MaskReg);
+  }
+  //   move scratch, destreg
+  BuildMI(LoopHeadMBB, DL, TII->get(LoongArch::OR), ScratchReg)
       .addReg(DestReg)
       .addReg(LoongArch::R0);
 
   switch (BinOp) {
   default:
     llvm_unreachable("Unexpected AtomicRMW BinOp");
-  // bgeu scratch2, incr, .looptail
+  // bgeu cmpr, incr, .looptail
   case AtomicRMWInst::UMax:
     BuildMI(LoopHeadMBB, DL, TII->get(LoongArch::BGEU))
-        .addReg(Scratch2Reg)
+        .addReg(CmprReg)
         .addReg(IncrReg)
         .addMBB(LoopTailMBB);
     break;
-  // bgeu incr, scratch2, .looptail
+  // bgeu incr, cmpr, .looptail
   case AtomicRMWInst::UMin:
     BuildMI(LoopHeadMBB, DL, TII->get(LoongArch::BGEU))
         .addReg(IncrReg)
-        .addReg(Scratch2Reg)
+        .addReg(CmprReg)
         .addMBB(LoopTailMBB);
     break;
   case AtomicRMWInst::Max:
-    insertSext(TII, DL, LoopHeadMBB, Scratch2Reg, MI.getOperand(6).getReg());
-    // bge scratch2, incr, .looptail
+    if (IsMasked)
+      insertSext(TII, DL, LoopHeadMBB, CmprReg, MI.getOperand(6).getReg());
+    // bge cmpr, incr, .looptail
     BuildMI(LoopHeadMBB, DL, TII->get(LoongArch::BGE))
-        .addReg(Scratch2Reg)
+        .addReg(CmprReg)
         .addReg(IncrReg)
         .addMBB(LoopTailMBB);
     break;
   case AtomicRMWInst::Min:
-    insertSext(TII, DL, LoopHeadMBB, Scratch2Reg, MI.getOperand(6).getReg());
-    // bge incr, scratch2, .looptail
+    if (IsMasked)
+      insertSext(TII, DL, LoopHeadMBB, CmprReg, MI.getOperand(6).getReg());
+    // bge incr, cmpr, .looptail
     BuildMI(LoopHeadMBB, DL, TII->get(LoongArch::BGE))
         .addReg(IncrReg)
-        .addReg(Scratch2Reg)
+        .addReg(CmprReg)
         .addMBB(LoopTailMBB);
     break;
     // TODO: support other AtomicRMWInst.
   }
 
   // .loopifbody:
-  //   xor scratch1, destreg, incr
-  //   and scratch1, scratch1, mask
-  //   xor scratch1, destreg, scratch1
-  insertMaskedMerge(TII, DL, LoopIfBodyMBB, Scratch1Reg, DestReg, IncrReg,
-                    MaskReg, Scratch1Reg);
+  if (IsMasked) {
+    Register MaskReg = MI.getOperand(5).getReg();
+    // xor scratch, destreg, incr
+    // and scratch, scratch, mask
+    // xor scratch, destreg, scratch
+    insertMaskedMerge(TII, DL, LoopIfBodyMBB, ScratchReg, DestReg, IncrReg,
+                      MaskReg, ScratchReg);
+  } else {
+    // move scratch, incr
+    BuildMI(LoopIfBodyMBB, DL, TII->get(LoongArch::OR), ScratchReg)
+        .addReg(IncrReg)
+        .addReg(LoongArch::R0);
+  }
 
   // .looptail:
-  //   sc.w scratch1, scratch1, (addr)
-  //   beqz scratch1, loop
-  BuildMI(LoopTailMBB, DL, TII->get(LoongArch::SC_W), Scratch1Reg)
-      .addReg(Scratch1Reg)
+  //   sc.w scratch, scratch, (addr)
+  //   beqz scratch, loop
+  BuildMI(LoopTailMBB, DL, TII->get(LoongArch::SC_W), ScratchReg)
+      .addReg(ScratchReg)
       .addReg(AddrReg)
       .addImm(0);
   BuildMI(LoopTailMBB, DL, TII->get(LoongArch::BEQ))
-      .addReg(Scratch1Reg)
+      .addReg(ScratchReg)
       .addReg(LoongArch::R0)
       .addMBB(LoopHeadMBB);
 
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
index 9f5c94ddea44f..c96be139340f3 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -7070,6 +7070,14 @@ getIntrinsicForMaskedAtomicRMWBinOp(unsigned GRLen,
       return Intrinsic::loongarch_masked_atomicrmw_sub_i32;
     case AtomicRMWInst::Nand:
       return Intrinsic::loongarch_masked_atomicrmw_nand_i32;
+    case AtomicRMWInst::UMax:
+      return Intrinsic::loongarch_masked_atomicrmw_umax_i32;
+    case AtomicRMWInst::UMin:
+      return Intrinsic::loongarch_masked_atomicrmw_umin_i32;
+    case AtomicRMWInst::Max:
+      return Intrinsic::loongarch_masked_atomicrmw_max_i32;
+    case AtomicRMWInst::Min:
+      return Intrinsic::loongarch_masked_atomicrmw_min_i32;
       // TODO: support other AtomicRMWInst.
     }
   }
@@ -7093,19 +7101,22 @@ LoongArchTargetLowering::shouldExpandAtomicCmpXchgInIR(
 Value *LoongArchTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
     IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
     Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
+  unsigned GRLen = Subtarget.getGRLen();
   AtomicOrdering FailOrd = CI->getFailureOrdering();
   Value *FailureOrdering =
       Builder.getIntN(Subtarget.getGRLen(), static_cast<uint64_t>(FailOrd));
-
-  // TODO: Support cmpxchg on LA32.
-  Intrinsic::ID CmpXchgIntrID = Intrinsic::loongarch_masked_cmpxchg_i64;
-  CmpVal = Builder.CreateSExt(CmpVal, Builder.getInt64Ty());
-  NewVal = Builder.CreateSExt(NewVal, Builder.getInt64Ty());
-  Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
+  Intrinsic::ID CmpXchgIntrID = Intrinsic::loongarch_masked_cmpxchg_i32;
+  if (GRLen == 64) {
+    CmpXchgIntrID = Intrinsic::loongarch_masked_cmpxchg_i64;
+    CmpVal = Builder.CreateSExt(CmpVal, Builder.getInt64Ty());
+    NewVal = Builder.CreateSExt(NewVal, Builder.getInt64Ty());
+    Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
+  }
   Type *Tys[] = {AlignedAddr->getType()};
   Value *Result = Builder.CreateIntrinsic(
       CmpXchgIntrID, Tys, {AlignedAddr, CmpVal, NewVal, Mask, FailureOrdering});
-  Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
+  if (GRLen == 64)
+    Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
   return Result;
 }
 
diff --git a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
index fcdd9a130d8b6..344f563bd61e8 100644
--- a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
+++ b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
@@ -2055,6 +2055,10 @@ def PseudoAtomicLoadSub32 : PseudoAM;
 def PseudoAtomicLoadAnd32 : PseudoAM;
 def PseudoAtomicLoadOr32 : PseudoAM;
 def PseudoAtomicLoadXor32 : PseudoAM;
+def PseudoAtomicLoadUMax32 : PseudoAM;
+def PseudoAtomicLoadUMin32 : PseudoAM;
+def PseudoAtomicLoadMax32 : PseudoAM;
+def PseudoAtomicLoadMin32 : PseudoAM;
 
 multiclass PseudoBinPat<string Op, Pseudo BinInst> {
   def : Pat<(!cast<PatFrag>(Op#"_monotonic") GPR:$addr, GPR:$incr),
@@ -2253,6 +2257,22 @@ def : Pat<(atomic_cmp_swap_i64 GPR:$addr, GPR:$cmp, GPR:$new),
           (AMCAS__DB_D GPR:$cmp, GPR:$new, GPR:$addr)>;
 }
 
+// Ordering constants must be kept in sync with the AtomicOrdering enum in
+// AtomicOrdering.h.
+multiclass PseudoCmpXchgPat<string Op, Pseudo CmpXchgInst,
+                            ValueType vt = GRLenVT> {
+  def : Pat<(vt (!cast<PatFrag>(Op#"_failure_monotonic") GPR:$addr, GPR:$cmp, GPR:$new)),
+            (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 2)>;
+  def : Pat<(vt (!cast<PatFrag>(Op#"_failure_acquire") GPR:$addr, GPR:$cmp, GPR:$new)),
+            (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 4)>;
+  def : Pat<(vt (!cast<PatFrag>(Op#"_failure_release") GPR:$addr, GPR:$cmp, GPR:$new)),
+            (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 5)>;
+  def : Pat<(vt (!cast<PatFrag>(Op#"_failure_acq_rel") GPR:$addr, GPR:$cmp, GPR:$new)),
+            (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 6)>;
+  def : Pat<(vt (!cast<PatFrag>(Op#"_failure_seq_cst") GPR:$addr, GPR:$cmp, GPR:$new)),
+            (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 7)>;
+}
+
 let Predicates = [IsLA64] in {
 defm : binary_atomic_op_wd<"AMSWAP", "atomic_swap">;
 defm : binary_atomic_op_wd<"AMADD", "atomic_load_add">;
@@ -2288,23 +2308,6 @@ def : AtomicPat<int_loongarch_masked_atomicrmw_umax_i64,
 def : AtomicPat<int_loongarch_masked_atomicrmw_umin_i64,
                 PseudoMaskedAtomicLoadUMin32>;
 
-// Ordering constants must be kept in sync with the AtomicOrdering enum in
-// AtomicOrdering.h.
-multiclass PseudoCmpXchgPat<string Op, Pseudo CmpXchgInst,
-                            ValueType vt = GRLenVT> {
-  def : Pat<(vt (!cast<PatFrag>(Op#"_failure_monotonic") GPR:$addr, GPR:$cmp, GPR:$new)),
-            (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 2)>;
-  def : Pat<(vt (!cast<PatFrag>(Op#"_failure_acquire") GPR:$addr, GPR:$cmp, GPR:$new)),
-            (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 4)>;
-  def : Pat<(vt (!cast<PatFrag>(Op#"_failure_release") GPR:$addr, GPR:$cmp, GPR:$new)),
-            (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 5)>;
-  def : Pat<(vt (!cast<PatFrag>(Op#"_failure_acq_rel") GPR:$addr, GPR:$cmp, GPR:$new)),
-            (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 6)>;
-  def : Pat<(vt (!cast<PatFrag>(Op#"_failure_seq_cst") GPR:$addr, GPR:$cmp, GPR:$new)),
-            (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 7)>;
-}
-
-defm : PseudoCmpXchgPat<"atomic_cmp_swap_i32", PseudoCmpXchg32>;
 defm : PseudoCmpXchgPat<"atomic_cmp_swap_i64", PseudoCmpXchg64, i64>;
 def : Pat<(int_loongarch_masked_cmpxchg_i64
             GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, timm:$fail_order),
@@ -2317,6 +2320,7 @@ def : PseudoMaskedAMMinMaxPat<int_loongarch_masked_atomicrmw_min_i64,
                               PseudoMaskedAtomicLoadMin32>;
 } // Predicates = [IsLA64]
 
+defm : PseudoCmpXchgPat<"atomic_cmp_swap_i32", PseudoCmpXchg32>;
 defm : PseudoBinPat<"atomic_load_nand_i32", PseudoAtomicLoadNand32>;
 
 let Predicates = [IsLA32] in {
@@ -2329,11 +2333,30 @@ def : AtomicPat<int_loongarch_masked_atomicrmw_sub_i32,
                 PseudoMaskedAtomicLoadSub32>;
 def : AtomicPat<int_loongarch_masked_atomicrmw_nand_i32,
                 PseudoMaskedAtomicLoadNand32>;
+def : AtomicPat<int_loongarch_masked_atomicrmw_umax_i32,
+                PseudoMaskedAtomicLoadUMax32>;
+def : AtomicPat<int_loongarch_masked_atomicrmw_umin_i32,
+                PseudoMaskedAtomicLoadUMin32>;
+
+def : PseudoMaskedAMMinMaxPat<int_loongarch_masked_atomicrmw_max_i32,
+                              PseudoMaskedAtomicLoadMax32>;
+def : PseudoMaskedAMMinMaxPat<int_loongarch_masked_atomicrmw_min_i32,
+                              PseudoMaskedAtomicLoadMin32>;
+
+def : Pat<(int_loongarch_masked_cmpxchg_i32
+            GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, timm:$fail_order),
+          (PseudoMaskedCmpXchg32
+            GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, timm:$fail_order)>;
+
 defm : PseudoBinPat<"atomic_load_add_i32", PseudoAtomicLoadAdd32>;
 defm : PseudoBinPat<"atomic_load_sub_i32", PseudoAtomicLoadSub32>;
 defm : PseudoBinPat<"atomic_load_and_i32", PseudoAtomicLoadAnd32>;
 defm : PseudoBinPat<"atomic_load_or_i32", PseudoAtomicLoadOr32>;
 defm : PseudoBinPat<"atomic_load_xor_i32", PseudoAtomicLoadXor32>;
+defm : PseudoBinPat<"atomic_load_umax_i32", PseudoAtomicLoadUMax32>;
+defm : PseudoBinPat<"atomic_load_umin_i32", PseudoAtomicLoadUMin32>;
+defm : PseudoBinPat<"atomic_load_max_i32", PseudoAtomicLoadMax32>;
+defm : PseudoBinPat<"atomic_load_min_i32", PseudoAtomicLoadMin32>;
 } // Predicates = [IsLA32]
 
 /// Intrinsics
diff --git a/llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw-minmax.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw-minmax.ll
index 096c2242661c0..a6a0f15f9f4a5 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw-minmax.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw-minmax.ll
@@ -1,10 +1,37 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch32 -mattr=+d --verify-machineinstrs < %s | \
+; RUN:   FileCheck %s --check-prefix=LA32
 ; RUN: llc --mtriple=loongarch64 -mattr=+d --verify-machineinstrs < %s | \
 ; RUN:   FileCheck %s --check-prefix=LA64
 
 ;; TODO: Testing for LA32 architecture will be added later
 
 define i8 @atomicrmw_umax_i8_acquire(ptr %a, i8 %b) nounwind {
+; LA32-LABEL: atomicrmw_umax_i8_acquire:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $a2, $zero, -4
+; LA32-NEXT:    and $a2, $a0, $a2
+; LA32-NEXT:    slli.w $a0, $a0, 3
+; LA32-NEXT:    ori $a3, $zero, 255
+; LA32-NEXT:    sll.w $a3, $a3, $a0
+; LA32-NEXT:    andi $a1, $a1, 255
+; LA32-NEXT:    sll.w $a1, $a1, $a0
+; LA32-NEXT:  .LBB0_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a4, $a2, 0
+; LA32-NEXT:    and $a6, $a4, $a3
+; LA32-NEXT:    move $a5, $a4
+; LA32-NEXT:    bgeu $a6, $a1, .LBB0_3
+; LA32-NEXT:  # %bb.2: # in Loop: Header=BB0_1 Depth=1
+; LA32-NEXT:    xor $a5, $a4, $a1
+; LA32-NEXT:    and $a5, $a5, $a3
+; LA32-NEXT:    xor $a5, $a4, $a5
+; LA32-NEXT:  .LBB0_3: # in Loop: Header=BB0_1 Depth=1
+; LA32-NEXT:    sc.w $a5, $a2, 0
+; LA32-NEXT:    beq $a5, $zero, .LBB0_1
+; LA32-NEXT:  # %bb.4:
+; LA32-NEXT:    srl.w $a0, $a4, $a0
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_umax_i8_acquire:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    slli.d $a2, $a0, 3
@@ -33,6 +60,32 @@ define i8 @atomicrmw_umax_i8_acquire(ptr %a, i8 %b) nounwind {
 }
 
 define i16 @atomicrmw_umax_i16_acquire(ptr %a, i16 %b) nounwind {
+; LA32-LABEL: atomicrmw_umax_i16_acquire:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $a2, $zero, -4
+; LA32-NEXT:    and $a2, $a0, $a2
+; LA32-NEXT:    slli.w $a0, $a0, 3
+; LA32-NEXT:    lu12i.w $a3, 15
+; LA32-NEXT:    ori $a3, $a3, 4095
+; LA32-NEXT:    sll.w $a4, $a3, $a0
+; LA32-NEXT:    and $a1, $a1, $a3
+; LA32-NEXT:    sll.w $a1, $a1, $a0
+; LA32-NEXT:  .LBB1_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a3, $a2, 0
+; LA32-NEXT:    and $a6, $a3, $a4
+; LA32-NEXT:    move $a5, $a3
+; LA32-NEXT:    bgeu $a6, $a1, .LBB1_3
+; LA32-NEXT:  # %bb.2: # in Loop: Header=BB1_1 Depth=1
+; LA32-NEXT:    xor $a5, $a3, $a1
+; LA32-NEXT:    and $a5, $a5, $a4
+; LA32-NEXT:    xor $a5, $a3, $a5
+; LA32-NEXT:  .LBB1_3: # in Loop: Header=BB1_1 Depth=1
+; LA32-NEXT:    sc.w $a5, $a2, 0
+; LA32-NEXT:    beq $a5, $zero, .LBB1_1
+; LA32-NEXT:  # %bb.4:
+; LA32-NEXT:    srl.w $a0, $a3, $a0
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_umax_i16_acquire:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    slli.d $a2, $a0, 3
@@ -62,6 +115,21 @@ define i16 @atomicrmw_umax_i16_acquire(ptr %a, i16 %b) nounwind {
 }
 
 define i32 @atomicrmw_umax_i32_acquire(ptr %a, i32 %b) nounwind {
+; LA32-LABEL: atomicrmw_umax_i32_acquire:
+; LA32:       # %bb.0:
+; LA32-NEXT:  .LBB2_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a2, $a0, 0
+; LA32-NEXT:    move $a3, $a2
+; LA32-NEXT:    bgeu $a2, $a1, .LBB2_3
+; LA32-NEXT:  # %bb.2: # in Loop: Header=BB2_1 Depth=1
+; LA32-NEXT:    move $a3, $a1
+; LA32-NEXT:  .LBB2_3: # in Loop: Header=BB2_1 Depth=1
+; LA32-NEXT:    sc.w $a3, $a0, 0
+; LA32-NEXT:    beq $a3, $zero, .LBB2_1
+; LA32-NEXT:  # %bb.4:
+; LA32-NEXT:    move $a0, $a2
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_umax_i32_acquire:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ammax_db.wu $a2, $a1, $a0
@@ -72,6 +140,65 @@ define i32 @atomicrmw_umax_i32_acquire(ptr %a, i32 %b) nounwind {
 }
 
 define i64 @atomicrmw_umax_i64_acquire(ptr %a, i64 %b) nounwind {
+; LA32-LABEL: atomicrmw_umax_i64_acquire:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $sp, $sp, -32
+; LA32-NEXT:    st.w $ra, $sp, 28 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $fp, $sp, 24 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s0, $sp, 20 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s1, $sp, 16 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s2, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    move $fp, $a0
+; LA32-NEXT:    ld.w $a5, $a0, 4
+; LA32-NEXT:    ld.w $a4, $a0, 0
+; LA32-NEXT:    move $s0, $a2
+; LA32-NEXT:    move $s1, $a1
+; LA32-NEXT:    addi.w $s2, $sp, 0
+; LA32-NEXT:    b .LBB3_2
+; LA32-NEXT:    .p2align 4, , 16
+; LA32-NEXT:  .LBB3_1: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB3_2 Depth=1
+; LA32-NEXT:    st.w $a4, $sp, 0
+; LA32-NEXT:    st.w $a5, $sp, 4
+; LA32-NEXT:    ori $a4, $zero, 2
+; LA32-NEXT:    ori $a5, $zero, 2
+; LA32-NEXT:    move $a0, $fp
+; LA32-NEXT:    move $a1, $s2
+; LA32-NEXT:    bl __atomic_compare_exchange_8
+; LA32-NEXT:    ld.w $a5, $sp, 4
+; LA32-NEXT:    ld.w $a4, $sp, 0
+; LA32-NEXT:    bne $a0, $zero, .LBB3_7
+; LA32-NEXT:  .LBB3_2: # %atomicrmw.start
+; LA32-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    beq $a5, $s0, .LBB3_4
+; LA32-NEXT:  # %bb.3: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB3_2 Depth=1
+; LA32-NEXT:    sltu $a0, $s0, $a5
+; LA32-NEXT:    b .LBB3_5
+; LA32-NEXT:    .p2align 4, , 16
+; LA32-NEXT:  .LBB3_4: # in Loop: Header=BB3_2 Depth=1
+; LA32-NEXT:    sltu $a0, $s1, $a4
+; LA32-NEXT:  .LBB3_5: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB3_2 Depth=1
+; LA32-NEXT:    move $a2, $a4
+; LA32-NEXT:    move $a3, $a5
+; LA32-NEXT:    bne $a0, $zero, .LBB3_1
+; LA32-NEXT:  # %bb.6: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB3_2 Depth=1
+; LA32-NEXT:    move $a2, $s1
+; LA32-NEXT:    move $a3, $s0
+; LA32-NEXT:    b .LBB3_1
+; LA32-NEXT:  .LBB3_7: # %atomicrmw.end
+; LA32-NEXT:    move $a0, $a4
+; LA32-NEXT:    move $a1, $a5
+; LA32-NEXT:    ld.w $s2, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $s1, $sp, 16 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $s0, $sp, 20 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $fp, $sp, 24 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $ra, $sp, 28 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 32
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_umax_i64_acquire:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ammax_db.du $a2, $a1, $a0
@@ -82,6 +209,31 @@ define i64 @atomicrmw_umax_i64_acquire(ptr %a, i64 %b) nounwind {
 }
 
 define i8 @atomicrmw_umin_i8_acquire(ptr %a, i8 %b) nounwind {
+; LA32-LABEL: atomicrmw_umin_i8_acquire:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $a2, $zero, -4
+; LA32-NEXT:    and $a2, $a0, $a2
+; LA32-NEXT:    slli.w $a0, $a0, 3
+; LA32-NEXT:    ori $a3, $zero, 255
+; LA32-NEXT:    sll.w $a3, $a3, $a0
+; LA32-NEXT:    andi $a1, $a1, 255
+; LA32-NEXT:    sll.w $a1, $a1, $a0
+; LA32-NEXT:  .LBB4_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a4, $a2, 0
+; LA32-NEXT:    and $a6, $a4, $a3
+; LA32-NEXT:    move $a5, $a4
+; LA32-NEXT:    bgeu $a1, $a6, .LBB4_3
+; LA32-NEXT:  # %bb.2: # in Loop: Header=BB4_1 Depth=1
+; LA32-NEXT:    xor $a5, $a4, $a1
+; LA32-NEXT:    and $a5, $a5, $a3
+; LA32-NEXT:    xor $a5, $a4, $a5
+; LA32-NEXT:  .LBB4_3: # in Loop: Header=BB4_1 Depth=1
+; LA32-NEXT:    sc.w $a5, $a2, 0
+; LA32-NEXT:    beq $a5, $zero, .LBB4_1
+; LA32-NEXT:  # %bb.4:
+; LA32-NEXT:    srl.w $a0, $a4, $a0
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_umin_i8_acquire:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    slli.d $a2, $a0, 3
@@ -110,6 +262,32 @@ define i8 @atomicrmw_umin_i8_acquire(ptr %a, i8 %b) nounwind {
 }
 
 define i16 @atomicrmw_umin_i16_acquire(ptr %a, i16 %b) nounwind {
+; LA32-LABEL: atomicrmw_umin_i16_acquire:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $a2, $zero, -4
+; LA32-NEXT:    and $a2, $a0, $a2
+; LA32-NEXT:    slli.w $a0, $a0, 3
+; LA32-NEXT:    lu12i.w $a3, 15
+; LA32-NEXT:    ori $a3, $a3, 4095
+; LA32-NEXT:    sll.w $a4, $a3, $a0
+; LA32-NEXT:    and $a1, $a1, $a3
+; LA32-NEXT:    sll.w $a1, $a1, $a0
+; LA32-NEXT:  .LBB5_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a3, $a2, 0
+; LA32-NEXT:    and $a6, $a3, $a4
+; LA32-NEXT:    move $a5, $a3
+; LA32-NEXT:    bgeu $a1, $a6, .LBB5_3
+; LA32-NEXT:  # %bb.2: # in Loop: Header=BB5_1 Depth=1
+; LA32-NEXT:    xor $a5, $a3, $a1
+; LA32-NEXT:    and $a5, $a5, $a4
+; LA32-NEXT:    xor $a5, $a3, $a5
+; LA32-NEXT:  .LBB5_3: # in Loop: Header=BB5_1 Depth=1
+; LA32-NEXT:    sc.w $a5, $a2, 0
+; LA32-NEXT:    beq $a5, $zero, .LBB5_1
+; LA32-NEXT:  # %bb.4:
+; LA32-NEXT:    srl.w $a0, $a3, $a0
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_umin_i16_acquire:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    slli.d $a2, $a0, 3
@@ -139,6 +317,21 @@ define i16 @atomicrmw_umin_i16_acquire(ptr %a, i16 %b) nounwind {
 }
 
 define i32 @atomicrmw_umin_i32_acquire(ptr %a, i32 %b) nounwind {
+; LA32-LABEL: atomicrmw_umin_i32_acquire:
+; LA32:       # %bb.0:
+; LA32-NEXT:  .LBB6_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a2, $a0, 0
+; LA32-NEXT:    move $a3, $a2
+; LA32-NEXT:    bgeu $a1, $a2, .LBB6_3
+; LA32-NEXT:  # %bb.2: # in Loop: Header=BB6_1 Depth=1
+; LA32-NEXT:    move $a3, $a1
+; LA32-NEXT:  .LBB6_3: # in Loop: Header=BB6_1 Depth=1
+; LA32-NEXT:    sc.w $a3, $a0, 0
+; LA32-NEXT:    beq $a3, $zero, .LBB6_1
+; LA32-NEXT:  # %bb.4:
+; LA32-NEXT:    move $a0, $a2
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_umin_i32_acquire:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ammin_db.wu $a2, $a1, $a0
@@ -149,6 +342,66 @@ define i32 @atomicrmw_umin_i32_acquire(ptr %a, i32 %b) nounwind {
 }
 
 define i64 @atomicrmw_umin_i64_acquire(ptr %a, i64 %b) nounwind {
+; LA32-LABEL: atomicrmw_umin_i64_acquire:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $sp, $sp, -32
+; LA32-NEXT:    st.w $ra, $sp, 28 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $fp, $sp, 24 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s0, $sp, 20 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s1, $sp, 16 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s2, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    move $fp, $a0
+; LA32-NEXT:    ld.w $a5, $a0, 4
+; LA32-NEXT:    ld.w $a4, $a0, 0
+; LA32-NEXT:    move $s0, $a2
+; LA32-NEXT:    move $s1, $a1
+; LA32-NEXT:    addi.w $s2, $sp, 0
+; LA32-NEXT:    b .LBB7_2
+; LA32-NEXT:    .p2align 4, , 16
+; LA32-NEXT:  .LBB7_1: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB7_2 Depth=1
+; LA32-NEXT:    st.w $a4, $sp, 0
+; LA32-NEXT:    st.w $a5, $sp, 4
+; LA32-NEXT:    ori $a4, $zero, 2
+; LA32-NEXT:    ori $a5, $zero, 2
+; LA32-NEXT:    move $a0, $fp
+; LA32-NEXT:    move $a1, $s2
+; LA32-NEXT:    bl __atomic_compare_exchange_8
+; LA32-NEXT:    ld.w $a5, $sp, 4
+; LA32-NEXT:    ld.w $a4, $sp, 0
+; LA32-NEXT:    bne $a0, $zero, .LBB7_7
+; LA32-NEXT:  .LBB7_2: # %atomicrmw.start
+; LA32-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    beq $a5, $s0, .LBB7_4
+; LA32-NEXT:  # %bb.3: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB7_2 Depth=1
+; LA32-NEXT:    sltu $a0, $s0, $a5
+; LA32-NEXT:    b .LBB7_5
+; LA32-NEXT:    .p2align 4, , 16
+; LA32-NEXT:  .LBB7_4: # in Loop: Header=BB7_2 Depth=1
+; LA32-NEXT:    sltu $a0, $s1, $a4
+; LA32-NEXT:  .LBB7_5: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB7_2 Depth=1
+; LA32-NEXT:    xori $a0, $a0, 1
+; LA32-NEXT:    move $a2, $a4
+; LA32-NEXT:    move $a3, $a5
+; LA32-NEXT:    bne $a0, $zero, .LBB7_1
+; LA32-NEXT:  # %bb.6: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB7_2 Depth=1
+; LA32-NEXT:    move $a2, $s1
+; LA32-NEXT:    move $a3, $s0
+; LA32-NEXT:    b .LBB7_1
+; LA32-NEXT:  .LBB7_7: # %atomicrmw.end
+; LA32-NEXT:    move $a0, $a4
+; LA32-NEXT:    move $a1, $a5
+; LA32-NEXT:    ld.w $s2, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $s1, $sp, 16 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $s0, $sp, 20 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $fp, $sp, 24 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $ra, $sp, 28 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 32
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_umin_i64_acquire:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ammin_db.du $a2, $a1, $a0
@@ -159,6 +412,36 @@ define i64 @atomicrmw_umin_i64_acquire(ptr %a, i64 %b) nounwind {
 }
 
 define i8 @atomicrmw_max_i8_acquire(ptr %a, i8 %b) nounwind {
+; LA32-LABEL: atomicrmw_max_i8_acquire:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $a2, $zero, -4
+; LA32-NEXT:    and $a2, $a0, $a2
+; LA32-NEXT:    slli.w $a0, $a0, 3
+; LA32-NEXT:    andi $a3, $a0, 24
+; LA32-NEXT:    ori $a4, $zero, 255
+; LA32-NEXT:    sll.w $a4, $a4, $a0
+; LA32-NEXT:    slli.w $a1, $a1, 24
+; LA32-NEXT:    srai.w $a1, $a1, 24
+; LA32-NEXT:    sll.w $a1, $a1, $a0
+; LA32-NEXT:    xori $a3, $a3, 24
+; LA32-NEXT:  .LBB8_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a5, $a2, 0
+; LA32-NEXT:    and $a7, $a5, $a4
+; LA32-NEXT:    move $a6, $a5
+; LA32-NEXT:    sll.w $a7, $a7, $a3
+; LA32-NEXT:    sra.w $a7, $a7, $a3
+; LA32-NEXT:    bge $a7, $a1, .LBB8_3
+; LA32-NEXT:  # %bb.2: # in Loop: Header=BB8_1 Depth=1
+; LA32-NEXT:    xor $a6, $a5, $a1
+; LA32-NEXT:    and $a6, $a6, $a4
+; LA32-NEXT:    xor $a6, $a5, $a6
+; LA32-NEXT:  .LBB8_3: # in Loop: Header=BB8_1 Depth=1
+; LA32-NEXT:    sc.w $a6, $a2, 0
+; LA32-NEXT:    beq $a6, $zero, .LBB8_1
+; LA32-NEXT:  # %bb.4:
+; LA32-NEXT:    srl.w $a0, $a5, $a0
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_max_i8_acquire:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    slli.d $a2, $a0, 3
@@ -191,6 +474,38 @@ define i8 @atomicrmw_max_i8_acquire(ptr %a, i8 %b) nounwind {
 }
 
 define i16 @atomicrmw_max_i16_acquire(ptr %a, i16 %b) nounwind {
+; LA32-LABEL: atomicrmw_max_i16_acquire:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $a2, $zero, -4
+; LA32-NEXT:    and $a2, $a0, $a2
+; LA32-NEXT:    slli.w $a0, $a0, 3
+; LA32-NEXT:    andi $a3, $a0, 24
+; LA32-NEXT:    lu12i.w $a4, 15
+; LA32-NEXT:    ori $a4, $a4, 4095
+; LA32-NEXT:    sll.w $a4, $a4, $a0
+; LA32-NEXT:    slli.w $a1, $a1, 16
+; LA32-NEXT:    srai.w $a1, $a1, 16
+; LA32-NEXT:    sll.w $a1, $a1, $a0
+; LA32-NEXT:    ori $a5, $zero, 16
+; LA32-NEXT:    sub.w $a3, $a5, $a3
+; LA32-NEXT:  .LBB9_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a5, $a2, 0
+; LA32-NEXT:    and $a7, $a5, $a4
+; LA32-NEXT:    move $a6, $a5
+; LA32-NEXT:    sll.w $a7, $a7, $a3
+; LA32-NEXT:    sra.w $a7, $a7, $a3
+; LA32-NEXT:    bge $a7, $a1, .LBB9_3
+; LA32-NEXT:  # %bb.2: # in Loop: Header=BB9_1 Depth=1
+; LA32-NEXT:    xor $a6, $a5, $a1
+; LA32-NEXT:    and $a6, $a6, $a4
+; LA32-NEXT:    xor $a6, $a5, $a6
+; LA32-NEXT:  .LBB9_3: # in Loop: Header=BB9_1 Depth=1
+; LA32-NEXT:    sc.w $a6, $a2, 0
+; LA32-NEXT:    beq $a6, $zero, .LBB9_1
+; LA32-NEXT:  # %bb.4:
+; LA32-NEXT:    srl.w $a0, $a5, $a0
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_max_i16_acquire:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    slli.d $a2, $a0, 3
@@ -225,6 +540,21 @@ define i16 @atomicrmw_max_i16_acquire(ptr %a, i16 %b) nounwind {
 }
 
 define i32 @atomicrmw_max_i32_acquire(ptr %a, i32 %b) nounwind {
+; LA32-LABEL: atomicrmw_max_i32_acquire:
+; LA32:       # %bb.0:
+; LA32-NEXT:  .LBB10_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a2, $a0, 0
+; LA32-NEXT:    move $a3, $a2
+; LA32-NEXT:    bge $a2, $a1, .LBB10_3
+; LA32-NEXT:  # %bb.2: # in Loop: Header=BB10_1 Depth=1
+; LA32-NEXT:    move $a3, $a1
+; LA32-NEXT:  .LBB10_3: # in Loop: Header=BB10_1 Depth=1
+; LA32-NEXT:    sc.w $a3, $a0, 0
+; LA32-NEXT:    beq $a3, $zero, .LBB10_1
+; LA32-NEXT:  # %bb.4:
+; LA32-NEXT:    move $a0, $a2
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_max_i32_acquire:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ammax_db.w $a2, $a1, $a0
@@ -235,6 +565,65 @@ define i32 @atomicrmw_max_i32_acquire(ptr %a, i32 %b) nounwind {
 }
 
 define i64 @atomicrmw_max_i64_acquire(ptr %a, i64 %b) nounwind {
+; LA32-LABEL: atomicrmw_max_i64_acquire:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $sp, $sp, -32
+; LA32-NEXT:    st.w $ra, $sp, 28 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $fp, $sp, 24 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s0, $sp, 20 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s1, $sp, 16 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s2, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    move $fp, $a0
+; LA32-NEXT:    ld.w $a5, $a0, 4
+; LA32-NEXT:    ld.w $a4, $a0, 0
+; LA32-NEXT:    move $s0, $a2
+; LA32-NEXT:    move $s1, $a1
+; LA32-NEXT:    addi.w $s2, $sp, 0
+; LA32-NEXT:    b .LBB11_2
+; LA32-NEXT:    .p2align 4, , 16
+; LA32-NEXT:  .LBB11_1: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB11_2 Depth=1
+; LA32-NEXT:    st.w $a4, $sp, 0
+; LA32-NEXT:    st.w $a5, $sp, 4
+; LA32-NEXT:    ori $a4, $zero, 2
+; LA32-NEXT:    ori $a5, $zero, 2
+; LA32-NEXT:    move $a0, $fp
+; LA32-NEXT:    move $a1, $s2
+; LA32-NEXT:    bl __atomic_compare_exchange_8
+; LA32-NEXT:    ld.w $a5, $sp, 4
+; LA32-NEXT:    ld.w $a4, $sp, 0
+; LA32-NEXT:    bne $a0, $zero, .LBB11_7
+; LA32-NEXT:  .LBB11_2: # %atomicrmw.start
+; LA32-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    beq $a5, $s0, .LBB11_4
+; LA32-NEXT:  # %bb.3: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB11_2 Depth=1
+; LA32-NEXT:    slt $a0, $s0, $a5
+; LA32-NEXT:    b .LBB11_5
+; LA32-NEXT:    .p2align 4, , 16
+; LA32-NEXT:  .LBB11_4: # in Loop: Header=BB11_2 Depth=1
+; LA32-NEXT:    sltu $a0, $s1, $a4
+; LA32-NEXT:  .LBB11_5: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB11_2 Depth=1
+; LA32-NEXT:    move $a2, $a4
+; LA32-NEXT:    move $a3, $a5
+; LA32-NEXT:    bne $a0, $zero, .LBB11_1
+; LA32-NEXT:  # %bb.6: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB11_2 Depth=1
+; LA32-NEXT:    move $a2, $s1
+; LA32-NEXT:    move $a3, $s0
+; LA32-NEXT:    b .LBB11_1
+; LA32-NEXT:  .LBB11_7: # %atomicrmw.end
+; LA32-NEXT:    move $a0, $a4
+; LA32-NEXT:    move $a1, $a5
+; LA32-NEXT:    ld.w $s2, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $s1, $sp, 16 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $s0, $sp, 20 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $fp, $sp, 24 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $ra, $sp, 28 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 32
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_max_i64_acquire:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ammax_db.d $a2, $a1, $a0
@@ -245,6 +634,36 @@ define i64 @atomicrmw_max_i64_acquire(ptr %a, i64 %b) nounwind {
 }
 
 define i8 @atomicrmw_min_i8_acquire(ptr %a, i8 %b) nounwind {
+; LA32-LABEL: atomicrmw_min_i8_acquire:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $a2, $zero, -4
+; LA32-NEXT:    and $a2, $a0, $a2
+; LA32-NEXT:    slli.w $a0, $a0, 3
+; LA32-NEXT:    andi $a3, $a0, 24
+; LA32-NEXT:    ori $a4, $zero, 255
+; LA32-NEXT:    sll.w $a4, $a4, $a0
+; LA32-NEXT:    slli.w $a1, $a1, 24
+; LA32-NEXT:    srai.w $a1, $a1, 24
+; LA32-NEXT:    sll.w $a1, $a1, $a0
+; LA32-NEXT:    xori $a3, $a3, 24
+; LA32-NEXT:  .LBB12_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a5, $a2, 0
+; LA32-NEXT:    and $a7, $a5, $a4
+; LA32-NEXT:    move $a6, $a5
+; LA32-NEXT:    sll.w $a7, $a7, $a3
+; LA32-NEXT:    sra.w $a7, $a7, $a3
+; LA32-NEXT:    bge $a1, $a7, .LBB12_3
+; LA32-NEXT:  # %bb.2: # in Loop: Header=BB12_1 Depth=1
+; LA32-NEXT:    xor $a6, $a5, $a1
+; LA32-NEXT:    and $a6, $a6, $a4
+; LA32-NEXT:    xor $a6, $a5, $a6
+; LA32-NEXT:  .LBB12_3: # in Loop: Header=BB12_1 Depth=1
+; LA32-NEXT:    sc.w $a6, $a2, 0
+; LA32-NEXT:    beq $a6, $zero, .LBB12_1
+; LA32-NEXT:  # %bb.4:
+; LA32-NEXT:    srl.w $a0, $a5, $a0
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_min_i8_acquire:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    slli.d $a2, $a0, 3
@@ -277,6 +696,38 @@ define i8 @atomicrmw_min_i8_acquire(ptr %a, i8 %b) nounwind {
 }
 
 define i16 @atomicrmw_min_i16_acquire(ptr %a, i16 %b) nounwind {
+; LA32-LABEL: atomicrmw_min_i16_acquire:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $a2, $zero, -4
+; LA32-NEXT:    and $a2, $a0, $a2
+; LA32-NEXT:    slli.w $a0, $a0, 3
+; LA32-NEXT:    andi $a3, $a0, 24
+; LA32-NEXT:    lu12i.w $a4, 15
+; LA32-NEXT:    ori $a4, $a4, 4095
+; LA32-NEXT:    sll.w $a4, $a4, $a0
+; LA32-NEXT:    slli.w $a1, $a1, 16
+; LA32-NEXT:    srai.w $a1, $a1, 16
+; LA32-NEXT:    sll.w $a1, $a1, $a0
+; LA32-NEXT:    ori $a5, $zero, 16
+; LA32-NEXT:    sub.w $a3, $a5, $a3
+; LA32-NEXT:  .LBB13_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a5, $a2, 0
+; LA32-NEXT:    and $a7, $a5, $a4
+; LA32-NEXT:    move $a6, $a5
+; LA32-NEXT:    sll.w $a7, $a7, $a3
+; LA32-NEXT:    sra.w $a7, $a7, $a3
+; LA32-NEXT:    bge $a1, $a7, .LBB13_3
+; LA32-NEXT:  # %bb.2: # in Loop: Header=BB13_1 Depth=1
+; LA32-NEXT:    xor $a6, $a5, $a1
+; LA32-NEXT:    and $a6, $a6, $a4
+; LA32-NEXT:    xor $a6, $a5, $a6
+; LA32-NEXT:  .LBB13_3: # in Loop: Header=BB13_1 Depth=1
+; LA32-NEXT:    sc.w $a6, $a2, 0
+; LA32-NEXT:    beq $a6, $zero, .LBB13_1
+; LA32-NEXT:  # %bb.4:
+; LA32-NEXT:    srl.w $a0, $a5, $a0
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_min_i16_acquire:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    slli.d $a2, $a0, 3
@@ -311,6 +762,21 @@ define i16 @atomicrmw_min_i16_acquire(ptr %a, i16 %b) nounwind {
 }
 
 define i32 @atomicrmw_min_i32_acquire(ptr %a, i32 %b) nounwind {
+; LA32-LABEL: atomicrmw_min_i32_acquire:
+; LA32:       # %bb.0:
+; LA32-NEXT:  .LBB14_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a2, $a0, 0
+; LA32-NEXT:    move $a3, $a2
+; LA32-NEXT:    bge $a1, $a2, .LBB14_3
+; LA32-NEXT:  # %bb.2: # in Loop: Header=BB14_1 Depth=1
+; LA32-NEXT:    move $a3, $a1
+; LA32-NEXT:  .LBB14_3: # in Loop: Header=BB14_1 Depth=1
+; LA32-NEXT:    sc.w $a3, $a0, 0
+; LA32-NEXT:    beq $a3, $zero, .LBB14_1
+; LA32-NEXT:  # %bb.4:
+; LA32-NEXT:    move $a0, $a2
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_min_i32_acquire:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ammin_db.w $a2, $a1, $a0
@@ -321,6 +787,66 @@ define i32 @atomicrmw_min_i32_acquire(ptr %a, i32 %b) nounwind {
 }
 
 define i64 @atomicrmw_min_i64_acquire(ptr %a, i64 %b) nounwind {
+; LA32-LABEL: atomicrmw_min_i64_acquire:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $sp, $sp, -32
+; LA32-NEXT:    st.w $ra, $sp, 28 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $fp, $sp, 24 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s0, $sp, 20 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s1, $sp, 16 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s2, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    move $fp, $a0
+; LA32-NEXT:    ld.w $a5, $a0, 4
+; LA32-NEXT:    ld.w $a4, $a0, 0
+; LA32-NEXT:    move $s0, $a2
+; LA32-NEXT:    move $s1, $a1
+; LA32-NEXT:    addi.w $s2, $sp, 0
+; LA32-NEXT:    b .LBB15_2
+; LA32-NEXT:    .p2align 4, , 16
+; LA32-NEXT:  .LBB15_1: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB15_2 Depth=1
+; LA32-NEXT:    st.w $a4, $sp, 0
+; LA32-NEXT:    st.w $a5, $sp, 4
+; LA32-NEXT:    ori $a4, $zero, 2
+; LA32-NEXT:    ori $a5, $zero, 2
+; LA32-NEXT:    move $a0, $fp
+; LA32-NEXT:    move $a1, $s2
+; LA32-NEXT:    bl __atomic_compare_exchange_8
+; LA32-NEXT:    ld.w $a5, $sp, 4
+; LA32-NEXT:    ld.w $a4, $sp, 0
+; LA32-NEXT:    bne $a0, $zero, .LBB15_7
+; LA32-NEXT:  .LBB15_2: # %atomicrmw.start
+; LA32-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    beq $a5, $s0, .LBB15_4
+; LA32-NEXT:  # %bb.3: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB15_2 Depth=1
+; LA32-NEXT:    slt $a0, $s0, $a5
+; LA32-NEXT:    b .LBB15_5
+; LA32-NEXT:    .p2align 4, , 16
+; LA32-NEXT:  .LBB15_4: # in Loop: Header=BB15_2 Depth=1
+; LA32-NEXT:    sltu $a0, $s1, $a4
+; LA32-NEXT:  .LBB15_5: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB15_2 Depth=1
+; LA32-NEXT:    xori $a0, $a0, 1
+; LA32-NEXT:    move $a2, $a4
+; LA32-NEXT:    move $a3, $a5
+; LA32-NEXT:    bne $a0, $zero, .LBB15_1
+; LA32-NEXT:  # %bb.6: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB15_2 Depth=1
+; LA32-NEXT:    move $a2, $s1
+; LA32-NEXT:    move $a3, $s0
+; LA32-NEXT:    b .LBB15_1
+; LA32-NEXT:  .LBB15_7: # %atomicrmw.end
+; LA32-NEXT:    move $a0, $a4
+; LA32-NEXT:    move $a1, $a5
+; LA32-NEXT:    ld.w $s2, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $s1, $sp, 16 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $s0, $sp, 20 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $fp, $sp, 24 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $ra, $sp, 28 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 32
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_min_i64_acquire:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ammin_db.d $a2, $a1, $a0
@@ -331,6 +857,31 @@ define i64 @atomicrmw_min_i64_acquire(ptr %a, i64 %b) nounwind {
 }
 
 define i8 @atomicrmw_umax_i8_release(ptr %a, i8 %b) nounwind {
+; LA32-LABEL: atomicrmw_umax_i8_release:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $a2, $zero, -4
+; LA32-NEXT:    and $a2, $a0, $a2
+; LA32-NEXT:    slli.w $a0, $a0, 3
+; LA32-NEXT:    ori $a3, $zero, 255
+; LA32-NEXT:    sll.w $a3, $a3, $a0
+; LA32-NEXT:    andi $a1, $a1, 255
+; LA32-NEXT:    sll.w $a1, $a1, $a0
+; LA32-NEXT:  .LBB16_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a4, $a2, 0
+; LA32-NEXT:    and $a6, $a4, $a3
+; LA32-NEXT:    move $a5, $a4
+; LA32-NEXT:    bgeu $a6, $a1, .LBB16_3
+; LA32-NEXT:  # %bb.2: # in Loop: Header=BB16_1 Depth=1
+; LA32-NEXT:    xor $a5, $a4, $a1
+; LA32-NEXT:    and $a5, $a5, $a3
+; LA32-NEXT:    xor $a5, $a4, $a5
+; LA32-NEXT:  .LBB16_3: # in Loop: Header=BB16_1 Depth=1
+; LA32-NEXT:    sc.w $a5, $a2, 0
+; LA32-NEXT:    beq $a5, $zero, .LBB16_1
+; LA32-NEXT:  # %bb.4:
+; LA32-NEXT:    srl.w $a0, $a4, $a0
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_umax_i8_release:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    slli.d $a2, $a0, 3
@@ -359,6 +910,32 @@ define i8 @atomicrmw_umax_i8_release(ptr %a, i8 %b) nounwind {
 }
 
 define i16 @atomicrmw_umax_i16_release(ptr %a, i16 %b) nounwind {
+; LA32-LABEL: atomicrmw_umax_i16_release:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $a2, $zero, -4
+; LA32-NEXT:    and $a2, $a0, $a2
+; LA32-NEXT:    slli.w $a0, $a0, 3
+; LA32-NEXT:    lu12i.w $a3, 15
+; LA32-NEXT:    ori $a3, $a3, 4095
+; LA32-NEXT:    sll.w $a4, $a3, $a0
+; LA32-NEXT:    and $a1, $a1, $a3
+; LA32-NEXT:    sll.w $a1, $a1, $a0
+; LA32-NEXT:  .LBB17_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a3, $a2, 0
+; LA32-NEXT:    and $a6, $a3, $a4
+; LA32-NEXT:    move $a5, $a3
+; LA32-NEXT:    bgeu $a6, $a1, .LBB17_3
+; LA32-NEXT:  # %bb.2: # in Loop: Header=BB17_1 Depth=1
+; LA32-NEXT:    xor $a5, $a3, $a1
+; LA32-NEXT:    and $a5, $a5, $a4
+; LA32-NEXT:    xor $a5, $a3, $a5
+; LA32-NEXT:  .LBB17_3: # in Loop: Header=BB17_1 Depth=1
+; LA32-NEXT:    sc.w $a5, $a2, 0
+; LA32-NEXT:    beq $a5, $zero, .LBB17_1
+; LA32-NEXT:  # %bb.4:
+; LA32-NEXT:    srl.w $a0, $a3, $a0
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_umax_i16_release:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    slli.d $a2, $a0, 3
@@ -388,6 +965,21 @@ define i16 @atomicrmw_umax_i16_release(ptr %a, i16 %b) nounwind {
 }
 
 define i32 @atomicrmw_umax_i32_release(ptr %a, i32 %b) nounwind {
+; LA32-LABEL: atomicrmw_umax_i32_release:
+; LA32:       # %bb.0:
+; LA32-NEXT:  .LBB18_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a2, $a0, 0
+; LA32-NEXT:    move $a3, $a2
+; LA32-NEXT:    bgeu $a2, $a1, .LBB18_3
+; LA32-NEXT:  # %bb.2: # in Loop: Header=BB18_1 Depth=1
+; LA32-NEXT:    move $a3, $a1
+; LA32-NEXT:  .LBB18_3: # in Loop: Header=BB18_1 Depth=1
+; LA32-NEXT:    sc.w $a3, $a0, 0
+; LA32-NEXT:    beq $a3, $zero, .LBB18_1
+; LA32-NEXT:  # %bb.4:
+; LA32-NEXT:    move $a0, $a2
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_umax_i32_release:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ammax_db.wu $a2, $a1, $a0
@@ -398,6 +990,65 @@ define i32 @atomicrmw_umax_i32_release(ptr %a, i32 %b) nounwind {
 }
 
 define i64 @atomicrmw_umax_i64_release(ptr %a, i64 %b) nounwind {
+; LA32-LABEL: atomicrmw_umax_i64_release:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $sp, $sp, -32
+; LA32-NEXT:    st.w $ra, $sp, 28 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $fp, $sp, 24 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s0, $sp, 20 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s1, $sp, 16 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s2, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    move $fp, $a0
+; LA32-NEXT:    ld.w $a5, $a0, 4
+; LA32-NEXT:    ld.w $a4, $a0, 0
+; LA32-NEXT:    move $s0, $a2
+; LA32-NEXT:    move $s1, $a1
+; LA32-NEXT:    addi.w $s2, $sp, 0
+; LA32-NEXT:    b .LBB19_2
+; LA32-NEXT:    .p2align 4, , 16
+; LA32-NEXT:  .LBB19_1: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB19_2 Depth=1
+; LA32-NEXT:    st.w $a4, $sp, 0
+; LA32-NEXT:    st.w $a5, $sp, 4
+; LA32-NEXT:    ori $a4, $zero, 3
+; LA32-NEXT:    move $a0, $fp
+; LA32-NEXT:    move $a1, $s2
+; LA32-NEXT:    move $a5, $zero
+; LA32-NEXT:    bl __atomic_compare_exchange_8
+; LA32-NEXT:    ld.w $a5, $sp, 4
+; LA32-NEXT:    ld.w $a4, $sp, 0
+; LA32-NEXT:    bne $a0, $zero, .LBB19_7
+; LA32-NEXT:  .LBB19_2: # %atomicrmw.start
+; LA32-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    beq $a5, $s0, .LBB19_4
+; LA32-NEXT:  # %bb.3: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB19_2 Depth=1
+; LA32-NEXT:    sltu $a0, $s0, $a5
+; LA32-NEXT:    b .LBB19_5
+; LA32-NEXT:    .p2align 4, , 16
+; LA32-NEXT:  .LBB19_4: # in Loop: Header=BB19_2 Depth=1
+; LA32-NEXT:    sltu $a0, $s1, $a4
+; LA32-NEXT:  .LBB19_5: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB19_2 Depth=1
+; LA32-NEXT:    move $a2, $a4
+; LA32-NEXT:    move $a3, $a5
+; LA32-NEXT:    bne $a0, $zero, .LBB19_1
+; LA32-NEXT:  # %bb.6: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB19_2 Depth=1
+; LA32-NEXT:    move $a2, $s1
+; LA32-NEXT:    move $a3, $s0
+; LA32-NEXT:    b .LBB19_1
+; LA32-NEXT:  .LBB19_7: # %atomicrmw.end
+; LA32-NEXT:    move $a0, $a4
+; LA32-NEXT:    move $a1, $a5
+; LA32-NEXT:    ld.w $s2, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $s1, $sp, 16 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $s0, $sp, 20 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $fp, $sp, 24 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $ra, $sp, 28 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 32
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_umax_i64_release:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ammax_db.du $a2, $a1, $a0
@@ -408,6 +1059,31 @@ define i64 @atomicrmw_umax_i64_release(ptr %a, i64 %b) nounwind {
 }
 
 define i8 @atomicrmw_umin_i8_release(ptr %a, i8 %b) nounwind {
+; LA32-LABEL: atomicrmw_umin_i8_release:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $a2, $zero, -4
+; LA32-NEXT:    and $a2, $a0, $a2
+; LA32-NEXT:    slli.w $a0, $a0, 3
+; LA32-NEXT:    ori $a3, $zero, 255
+; LA32-NEXT:    sll.w $a3, $a3, $a0
+; LA32-NEXT:    andi $a1, $a1, 255
+; LA32-NEXT:    sll.w $a1, $a1, $a0
+; LA32-NEXT:  .LBB20_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a4, $a2, 0
+; LA32-NEXT:    and $a6, $a4, $a3
+; LA32-NEXT:    move $a5, $a4
+; LA32-NEXT:    bgeu $a1, $a6, .LBB20_3
+; LA32-NEXT:  # %bb.2: # in Loop: Header=BB20_1 Depth=1
+; LA32-NEXT:    xor $a5, $a4, $a1
+; LA32-NEXT:    and $a5, $a5, $a3
+; LA32-NEXT:    xor $a5, $a4, $a5
+; LA32-NEXT:  .LBB20_3: # in Loop: Header=BB20_1 Depth=1
+; LA32-NEXT:    sc.w $a5, $a2, 0
+; LA32-NEXT:    beq $a5, $zero, .LBB20_1
+; LA32-NEXT:  # %bb.4:
+; LA32-NEXT:    srl.w $a0, $a4, $a0
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_umin_i8_release:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    slli.d $a2, $a0, 3
@@ -436,6 +1112,32 @@ define i8 @atomicrmw_umin_i8_release(ptr %a, i8 %b) nounwind {
 }
 
 define i16 @atomicrmw_umin_i16_release(ptr %a, i16 %b) nounwind {
+; LA32-LABEL: atomicrmw_umin_i16_release:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $a2, $zero, -4
+; LA32-NEXT:    and $a2, $a0, $a2
+; LA32-NEXT:    slli.w $a0, $a0, 3
+; LA32-NEXT:    lu12i.w $a3, 15
+; LA32-NEXT:    ori $a3, $a3, 4095
+; LA32-NEXT:    sll.w $a4, $a3, $a0
+; LA32-NEXT:    and $a1, $a1, $a3
+; LA32-NEXT:    sll.w $a1, $a1, $a0
+; LA32-NEXT:  .LBB21_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a3, $a2, 0
+; LA32-NEXT:    and $a6, $a3, $a4
+; LA32-NEXT:    move $a5, $a3
+; LA32-NEXT:    bgeu $a1, $a6, .LBB21_3
+; LA32-NEXT:  # %bb.2: # in Loop: Header=BB21_1 Depth=1
+; LA32-NEXT:    xor $a5, $a3, $a1
+; LA32-NEXT:    and $a5, $a5, $a4
+; LA32-NEXT:    xor $a5, $a3, $a5
+; LA32-NEXT:  .LBB21_3: # in Loop: Header=BB21_1 Depth=1
+; LA32-NEXT:    sc.w $a5, $a2, 0
+; LA32-NEXT:    beq $a5, $zero, .LBB21_1
+; LA32-NEXT:  # %bb.4:
+; LA32-NEXT:    srl.w $a0, $a3, $a0
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_umin_i16_release:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    slli.d $a2, $a0, 3
@@ -465,6 +1167,21 @@ define i16 @atomicrmw_umin_i16_release(ptr %a, i16 %b) nounwind {
 }
 
 define i32 @atomicrmw_umin_i32_release(ptr %a, i32 %b) nounwind {
+; LA32-LABEL: atomicrmw_umin_i32_release:
+; LA32:       # %bb.0:
+; LA32-NEXT:  .LBB22_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a2, $a0, 0
+; LA32-NEXT:    move $a3, $a2
+; LA32-NEXT:    bgeu $a1, $a2, .LBB22_3
+; LA32-NEXT:  # %bb.2: # in Loop: Header=BB22_1 Depth=1
+; LA32-NEXT:    move $a3, $a1
+; LA32-NEXT:  .LBB22_3: # in Loop: Header=BB22_1 Depth=1
+; LA32-NEXT:    sc.w $a3, $a0, 0
+; LA32-NEXT:    beq $a3, $zero, .LBB22_1
+; LA32-NEXT:  # %bb.4:
+; LA32-NEXT:    move $a0, $a2
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_umin_i32_release:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ammin_db.wu $a2, $a1, $a0
@@ -475,6 +1192,66 @@ define i32 @atomicrmw_umin_i32_release(ptr %a, i32 %b) nounwind {
 }
 
 define i64 @atomicrmw_umin_i64_release(ptr %a, i64 %b) nounwind {
+; LA32-LABEL: atomicrmw_umin_i64_release:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $sp, $sp, -32
+; LA32-NEXT:    st.w $ra, $sp, 28 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $fp, $sp, 24 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s0, $sp, 20 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s1, $sp, 16 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s2, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    move $fp, $a0
+; LA32-NEXT:    ld.w $a5, $a0, 4
+; LA32-NEXT:    ld.w $a4, $a0, 0
+; LA32-NEXT:    move $s0, $a2
+; LA32-NEXT:    move $s1, $a1
+; LA32-NEXT:    addi.w $s2, $sp, 0
+; LA32-NEXT:    b .LBB23_2
+; LA32-NEXT:    .p2align 4, , 16
+; LA32-NEXT:  .LBB23_1: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB23_2 Depth=1
+; LA32-NEXT:    st.w $a4, $sp, 0
+; LA32-NEXT:    st.w $a5, $sp, 4
+; LA32-NEXT:    ori $a4, $zero, 3
+; LA32-NEXT:    move $a0, $fp
+; LA32-NEXT:    move $a1, $s2
+; LA32-NEXT:    move $a5, $zero
+; LA32-NEXT:    bl __atomic_compare_exchange_8
+; LA32-NEXT:    ld.w $a5, $sp, 4
+; LA32-NEXT:    ld.w $a4, $sp, 0
+; LA32-NEXT:    bne $a0, $zero, .LBB23_7
+; LA32-NEXT:  .LBB23_2: # %atomicrmw.start
+; LA32-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    beq $a5, $s0, .LBB23_4
+; LA32-NEXT:  # %bb.3: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB23_2 Depth=1
+; LA32-NEXT:    sltu $a0, $s0, $a5
+; LA32-NEXT:    b .LBB23_5
+; LA32-NEXT:    .p2align 4, , 16
+; LA32-NEXT:  .LBB23_4: # in Loop: Header=BB23_2 Depth=1
+; LA32-NEXT:    sltu $a0, $s1, $a4
+; LA32-NEXT:  .LBB23_5: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB23_2 Depth=1
+; LA32-NEXT:    xori $a0, $a0, 1
+; LA32-NEXT:    move $a2, $a4
+; LA32-NEXT:    move $a3, $a5
+; LA32-NEXT:    bne $a0, $zero, .LBB23_1
+; LA32-NEXT:  # %bb.6: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB23_2 Depth=1
+; LA32-NEXT:    move $a2, $s1
+; LA32-NEXT:    move $a3, $s0
+; LA32-NEXT:    b .LBB23_1
+; LA32-NEXT:  .LBB23_7: # %atomicrmw.end
+; LA32-NEXT:    move $a0, $a4
+; LA32-NEXT:    move $a1, $a5
+; LA32-NEXT:    ld.w $s2, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $s1, $sp, 16 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $s0, $sp, 20 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $fp, $sp, 24 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $ra, $sp, 28 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 32
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_umin_i64_release:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ammin_db.du $a2, $a1, $a0
@@ -485,6 +1262,36 @@ define i64 @atomicrmw_umin_i64_release(ptr %a, i64 %b) nounwind {
 }
 
 define i8 @atomicrmw_max_i8_release(ptr %a, i8 %b) nounwind {
+; LA32-LABEL: atomicrmw_max_i8_release:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $a2, $zero, -4
+; LA32-NEXT:    and $a2, $a0, $a2
+; LA32-NEXT:    slli.w $a0, $a0, 3
+; LA32-NEXT:    andi $a3, $a0, 24
+; LA32-NEXT:    ori $a4, $zero, 255
+; LA32-NEXT:    sll.w $a4, $a4, $a0
+; LA32-NEXT:    slli.w $a1, $a1, 24
+; LA32-NEXT:    srai.w $a1, $a1, 24
+; LA32-NEXT:    sll.w $a1, $a1, $a0
+; LA32-NEXT:    xori $a3, $a3, 24
+; LA32-NEXT:  .LBB24_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a5, $a2, 0
+; LA32-NEXT:    and $a7, $a5, $a4
+; LA32-NEXT:    move $a6, $a5
+; LA32-NEXT:    sll.w $a7, $a7, $a3
+; LA32-NEXT:    sra.w $a7, $a7, $a3
+; LA32-NEXT:    bge $a7, $a1, .LBB24_3
+; LA32-NEXT:  # %bb.2: # in Loop: Header=BB24_1 Depth=1
+; LA32-NEXT:    xor $a6, $a5, $a1
+; LA32-NEXT:    and $a6, $a6, $a4
+; LA32-NEXT:    xor $a6, $a5, $a6
+; LA32-NEXT:  .LBB24_3: # in Loop: Header=BB24_1 Depth=1
+; LA32-NEXT:    sc.w $a6, $a2, 0
+; LA32-NEXT:    beq $a6, $zero, .LBB24_1
+; LA32-NEXT:  # %bb.4:
+; LA32-NEXT:    srl.w $a0, $a5, $a0
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_max_i8_release:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    slli.d $a2, $a0, 3
@@ -517,6 +1324,38 @@ define i8 @atomicrmw_max_i8_release(ptr %a, i8 %b) nounwind {
 }
 
 define i16 @atomicrmw_max_i16_release(ptr %a, i16 %b) nounwind {
+; LA32-LABEL: atomicrmw_max_i16_release:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $a2, $zero, -4
+; LA32-NEXT:    and $a2, $a0, $a2
+; LA32-NEXT:    slli.w $a0, $a0, 3
+; LA32-NEXT:    andi $a3, $a0, 24
+; LA32-NEXT:    lu12i.w $a4, 15
+; LA32-NEXT:    ori $a4, $a4, 4095
+; LA32-NEXT:    sll.w $a4, $a4, $a0
+; LA32-NEXT:    slli.w $a1, $a1, 16
+; LA32-NEXT:    srai.w $a1, $a1, 16
+; LA32-NEXT:    sll.w $a1, $a1, $a0
+; LA32-NEXT:    ori $a5, $zero, 16
+; LA32-NEXT:    sub.w $a3, $a5, $a3
+; LA32-NEXT:  .LBB25_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a5, $a2, 0
+; LA32-NEXT:    and $a7, $a5, $a4
+; LA32-NEXT:    move $a6, $a5
+; LA32-NEXT:    sll.w $a7, $a7, $a3
+; LA32-NEXT:    sra.w $a7, $a7, $a3
+; LA32-NEXT:    bge $a7, $a1, .LBB25_3
+; LA32-NEXT:  # %bb.2: # in Loop: Header=BB25_1 Depth=1
+; LA32-NEXT:    xor $a6, $a5, $a1
+; LA32-NEXT:    and $a6, $a6, $a4
+; LA32-NEXT:    xor $a6, $a5, $a6
+; LA32-NEXT:  .LBB25_3: # in Loop: Header=BB25_1 Depth=1
+; LA32-NEXT:    sc.w $a6, $a2, 0
+; LA32-NEXT:    beq $a6, $zero, .LBB25_1
+; LA32-NEXT:  # %bb.4:
+; LA32-NEXT:    srl.w $a0, $a5, $a0
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_max_i16_release:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    slli.d $a2, $a0, 3
@@ -551,6 +1390,21 @@ define i16 @atomicrmw_max_i16_release(ptr %a, i16 %b) nounwind {
 }
 
 define i32 @atomicrmw_max_i32_release(ptr %a, i32 %b) nounwind {
+; LA32-LABEL: atomicrmw_max_i32_release:
+; LA32:       # %bb.0:
+; LA32-NEXT:  .LBB26_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a2, $a0, 0
+; LA32-NEXT:    move $a3, $a2
+; LA32-NEXT:    bge $a2, $a1, .LBB26_3
+; LA32-NEXT:  # %bb.2: # in Loop: Header=BB26_1 Depth=1
+; LA32-NEXT:    move $a3, $a1
+; LA32-NEXT:  .LBB26_3: # in Loop: Header=BB26_1 Depth=1
+; LA32-NEXT:    sc.w $a3, $a0, 0
+; LA32-NEXT:    beq $a3, $zero, .LBB26_1
+; LA32-NEXT:  # %bb.4:
+; LA32-NEXT:    move $a0, $a2
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_max_i32_release:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ammax_db.w $a2, $a1, $a0
@@ -561,6 +1415,65 @@ define i32 @atomicrmw_max_i32_release(ptr %a, i32 %b) nounwind {
 }
 
 define i64 @atomicrmw_max_i64_release(ptr %a, i64 %b) nounwind {
+; LA32-LABEL: atomicrmw_max_i64_release:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $sp, $sp, -32
+; LA32-NEXT:    st.w $ra, $sp, 28 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $fp, $sp, 24 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s0, $sp, 20 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s1, $sp, 16 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s2, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    move $fp, $a0
+; LA32-NEXT:    ld.w $a5, $a0, 4
+; LA32-NEXT:    ld.w $a4, $a0, 0
+; LA32-NEXT:    move $s0, $a2
+; LA32-NEXT:    move $s1, $a1
+; LA32-NEXT:    addi.w $s2, $sp, 0
+; LA32-NEXT:    b .LBB27_2
+; LA32-NEXT:    .p2align 4, , 16
+; LA32-NEXT:  .LBB27_1: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB27_2 Depth=1
+; LA32-NEXT:    st.w $a4, $sp, 0
+; LA32-NEXT:    st.w $a5, $sp, 4
+; LA32-NEXT:    ori $a4, $zero, 3
+; LA32-NEXT:    move $a0, $fp
+; LA32-NEXT:    move $a1, $s2
+; LA32-NEXT:    move $a5, $zero
+; LA32-NEXT:    bl __atomic_compare_exchange_8
+; LA32-NEXT:    ld.w $a5, $sp, 4
+; LA32-NEXT:    ld.w $a4, $sp, 0
+; LA32-NEXT:    bne $a0, $zero, .LBB27_7
+; LA32-NEXT:  .LBB27_2: # %atomicrmw.start
+; LA32-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    beq $a5, $s0, .LBB27_4
+; LA32-NEXT:  # %bb.3: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB27_2 Depth=1
+; LA32-NEXT:    slt $a0, $s0, $a5
+; LA32-NEXT:    b .LBB27_5
+; LA32-NEXT:    .p2align 4, , 16
+; LA32-NEXT:  .LBB27_4: # in Loop: Header=BB27_2 Depth=1
+; LA32-NEXT:    sltu $a0, $s1, $a4
+; LA32-NEXT:  .LBB27_5: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB27_2 Depth=1
+; LA32-NEXT:    move $a2, $a4
+; LA32-NEXT:    move $a3, $a5
+; LA32-NEXT:    bne $a0, $zero, .LBB27_1
+; LA32-NEXT:  # %bb.6: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB27_2 Depth=1
+; LA32-NEXT:    move $a2, $s1
+; LA32-NEXT:    move $a3, $s0
+; LA32-NEXT:    b .LBB27_1
+; LA32-NEXT:  .LBB27_7: # %atomicrmw.end
+; LA32-NEXT:    move $a0, $a4
+; LA32-NEXT:    move $a1, $a5
+; LA32-NEXT:    ld.w $s2, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $s1, $sp, 16 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $s0, $sp, 20 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $fp, $sp, 24 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $ra, $sp, 28 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 32
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_max_i64_release:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ammax_db.d $a2, $a1, $a0
@@ -571,6 +1484,36 @@ define i64 @atomicrmw_max_i64_release(ptr %a, i64 %b) nounwind {
 }
 
 define i8 @atomicrmw_min_i8_release(ptr %a, i8 %b) nounwind {
+; LA32-LABEL: atomicrmw_min_i8_release:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $a2, $zero, -4
+; LA32-NEXT:    and $a2, $a0, $a2
+; LA32-NEXT:    slli.w $a0, $a0, 3
+; LA32-NEXT:    andi $a3, $a0, 24
+; LA32-NEXT:    ori $a4, $zero, 255
+; LA32-NEXT:    sll.w $a4, $a4, $a0
+; LA32-NEXT:    slli.w $a1, $a1, 24
+; LA32-NEXT:    srai.w $a1, $a1, 24
+; LA32-NEXT:    sll.w $a1, $a1, $a0
+; LA32-NEXT:    xori $a3, $a3, 24
+; LA32-NEXT:  .LBB28_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a5, $a2, 0
+; LA32-NEXT:    and $a7, $a5, $a4
+; LA32-NEXT:    move $a6, $a5
+; LA32-NEXT:    sll.w $a7, $a7, $a3
+; LA32-NEXT:    sra.w $a7, $a7, $a3
+; LA32-NEXT:    bge $a1, $a7, .LBB28_3
+; LA32-NEXT:  # %bb.2: # in Loop: Header=BB28_1 Depth=1
+; LA32-NEXT:    xor $a6, $a5, $a1
+; LA32-NEXT:    and $a6, $a6, $a4
+; LA32-NEXT:    xor $a6, $a5, $a6
+; LA32-NEXT:  .LBB28_3: # in Loop: Header=BB28_1 Depth=1
+; LA32-NEXT:    sc.w $a6, $a2, 0
+; LA32-NEXT:    beq $a6, $zero, .LBB28_1
+; LA32-NEXT:  # %bb.4:
+; LA32-NEXT:    srl.w $a0, $a5, $a0
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_min_i8_release:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    slli.d $a2, $a0, 3
@@ -603,6 +1546,38 @@ define i8 @atomicrmw_min_i8_release(ptr %a, i8 %b) nounwind {
 }
 
 define i16 @atomicrmw_min_i16_release(ptr %a, i16 %b) nounwind {
+; LA32-LABEL: atomicrmw_min_i16_release:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $a2, $zero, -4
+; LA32-NEXT:    and $a2, $a0, $a2
+; LA32-NEXT:    slli.w $a0, $a0, 3
+; LA32-NEXT:    andi $a3, $a0, 24
+; LA32-NEXT:    lu12i.w $a4, 15
+; LA32-NEXT:    ori $a4, $a4, 4095
+; LA32-NEXT:    sll.w $a4, $a4, $a0
+; LA32-NEXT:    slli.w $a1, $a1, 16
+; LA32-NEXT:    srai.w $a1, $a1, 16
+; LA32-NEXT:    sll.w $a1, $a1, $a0
+; LA32-NEXT:    ori $a5, $zero, 16
+; LA32-NEXT:    sub.w $a3, $a5, $a3
+; LA32-NEXT:  .LBB29_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a5, $a2, 0
+; LA32-NEXT:    and $a7, $a5, $a4
+; LA32-NEXT:    move $a6, $a5
+; LA32-NEXT:    sll.w $a7, $a7, $a3
+; LA32-NEXT:    sra.w $a7, $a7, $a3
+; LA32-NEXT:    bge $a1, $a7, .LBB29_3
+; LA32-NEXT:  # %bb.2: # in Loop: Header=BB29_1 Depth=1
+; LA32-NEXT:    xor $a6, $a5, $a1
+; LA32-NEXT:    and $a6, $a6, $a4
+; LA32-NEXT:    xor $a6, $a5, $a6
+; LA32-NEXT:  .LBB29_3: # in Loop: Header=BB29_1 Depth=1
+; LA32-NEXT:    sc.w $a6, $a2, 0
+; LA32-NEXT:    beq $a6, $zero, .LBB29_1
+; LA32-NEXT:  # %bb.4:
+; LA32-NEXT:    srl.w $a0, $a5, $a0
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_min_i16_release:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    slli.d $a2, $a0, 3
@@ -637,6 +1612,21 @@ define i16 @atomicrmw_min_i16_release(ptr %a, i16 %b) nounwind {
 }
 
 define i32 @atomicrmw_min_i32_release(ptr %a, i32 %b) nounwind {
+; LA32-LABEL: atomicrmw_min_i32_release:
+; LA32:       # %bb.0:
+; LA32-NEXT:  .LBB30_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a2, $a0, 0
+; LA32-NEXT:    move $a3, $a2
+; LA32-NEXT:    bge $a1, $a2, .LBB30_3
+; LA32-NEXT:  # %bb.2: # in Loop: Header=BB30_1 Depth=1
+; LA32-NEXT:    move $a3, $a1
+; LA32-NEXT:  .LBB30_3: # in Loop: Header=BB30_1 Depth=1
+; LA32-NEXT:    sc.w $a3, $a0, 0
+; LA32-NEXT:    beq $a3, $zero, .LBB30_1
+; LA32-NEXT:  # %bb.4:
+; LA32-NEXT:    move $a0, $a2
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_min_i32_release:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ammin_db.w $a2, $a1, $a0
@@ -647,6 +1637,66 @@ define i32 @atomicrmw_min_i32_release(ptr %a, i32 %b) nounwind {
 }
 
 define i64 @atomicrmw_min_i64_release(ptr %a, i64 %b) nounwind {
+; LA32-LABEL: atomicrmw_min_i64_release:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $sp, $sp, -32
+; LA32-NEXT:    st.w $ra, $sp, 28 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $fp, $sp, 24 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s0, $sp, 20 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s1, $sp, 16 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s2, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    move $fp, $a0
+; LA32-NEXT:    ld.w $a5, $a0, 4
+; LA32-NEXT:    ld.w $a4, $a0, 0
+; LA32-NEXT:    move $s0, $a2
+; LA32-NEXT:    move $s1, $a1
+; LA32-NEXT:    addi.w $s2, $sp, 0
+; LA32-NEXT:    b .LBB31_2
+; LA32-NEXT:    .p2align 4, , 16
+; LA32-NEXT:  .LBB31_1: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB31_2 Depth=1
+; LA32-NEXT:    st.w $a4, $sp, 0
+; LA32-NEXT:    st.w $a5, $sp, 4
+; LA32-NEXT:    ori $a4, $zero, 3
+; LA32-NEXT:    move $a0, $fp
+; LA32-NEXT:    move $a1, $s2
+; LA32-NEXT:    move $a5, $zero
+; LA32-NEXT:    bl __atomic_compare_exchange_8
+; LA32-NEXT:    ld.w $a5, $sp, 4
+; LA32-NEXT:    ld.w $a4, $sp, 0
+; LA32-NEXT:    bne $a0, $zero, .LBB31_7
+; LA32-NEXT:  .LBB31_2: # %atomicrmw.start
+; LA32-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    beq $a5, $s0, .LBB31_4
+; LA32-NEXT:  # %bb.3: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB31_2 Depth=1
+; LA32-NEXT:    slt $a0, $s0, $a5
+; LA32-NEXT:    b .LBB31_5
+; LA32-NEXT:    .p2align 4, , 16
+; LA32-NEXT:  .LBB31_4: # in Loop: Header=BB31_2 Depth=1
+; LA32-NEXT:    sltu $a0, $s1, $a4
+; LA32-NEXT:  .LBB31_5: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB31_2 Depth=1
+; LA32-NEXT:    xori $a0, $a0, 1
+; LA32-NEXT:    move $a2, $a4
+; LA32-NEXT:    move $a3, $a5
+; LA32-NEXT:    bne $a0, $zero, .LBB31_1
+; LA32-NEXT:  # %bb.6: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB31_2 Depth=1
+; LA32-NEXT:    move $a2, $s1
+; LA32-NEXT:    move $a3, $s0
+; LA32-NEXT:    b .LBB31_1
+; LA32-NEXT:  .LBB31_7: # %atomicrmw.end
+; LA32-NEXT:    move $a0, $a4
+; LA32-NEXT:    move $a1, $a5
+; LA32-NEXT:    ld.w $s2, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $s1, $sp, 16 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $s0, $sp, 20 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $fp, $sp, 24 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $ra, $sp, 28 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 32
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_min_i64_release:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ammin_db.d $a2, $a1, $a0
@@ -657,6 +1707,31 @@ define i64 @atomicrmw_min_i64_release(ptr %a, i64 %b) nounwind {
 }
 
 define i8 @atomicrmw_umax_i8_acq_rel(ptr %a, i8 %b) nounwind {
+; LA32-LABEL: atomicrmw_umax_i8_acq_rel:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $a2, $zero, -4
+; LA32-NEXT:    and $a2, $a0, $a2
+; LA32-NEXT:    slli.w $a0, $a0, 3
+; LA32-NEXT:    ori $a3, $zero, 255
+; LA32-NEXT:    sll.w $a3, $a3, $a0
+; LA32-NEXT:    andi $a1, $a1, 255
+; LA32-NEXT:    sll.w $a1, $a1, $a0
+; LA32-NEXT:  .LBB32_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a4, $a2, 0
+; LA32-NEXT:    and $a6, $a4, $a3
+; LA32-NEXT:    move $a5, $a4
+; LA32-NEXT:    bgeu $a6, $a1, .LBB32_3
+; LA32-NEXT:  # %bb.2: # in Loop: Header=BB32_1 Depth=1
+; LA32-NEXT:    xor $a5, $a4, $a1
+; LA32-NEXT:    and $a5, $a5, $a3
+; LA32-NEXT:    xor $a5, $a4, $a5
+; LA32-NEXT:  .LBB32_3: # in Loop: Header=BB32_1 Depth=1
+; LA32-NEXT:    sc.w $a5, $a2, 0
+; LA32-NEXT:    beq $a5, $zero, .LBB32_1
+; LA32-NEXT:  # %bb.4:
+; LA32-NEXT:    srl.w $a0, $a4, $a0
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_umax_i8_acq_rel:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    slli.d $a2, $a0, 3
@@ -685,6 +1760,32 @@ define i8 @atomicrmw_umax_i8_acq_rel(ptr %a, i8 %b) nounwind {
 }
 
 define i16 @atomicrmw_umax_i16_acq_rel(ptr %a, i16 %b) nounwind {
+; LA32-LABEL: atomicrmw_umax_i16_acq_rel:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $a2, $zero, -4
+; LA32-NEXT:    and $a2, $a0, $a2
+; LA32-NEXT:    slli.w $a0, $a0, 3
+; LA32-NEXT:    lu12i.w $a3, 15
+; LA32-NEXT:    ori $a3, $a3, 4095
+; LA32-NEXT:    sll.w $a4, $a3, $a0
+; LA32-NEXT:    and $a1, $a1, $a3
+; LA32-NEXT:    sll.w $a1, $a1, $a0
+; LA32-NEXT:  .LBB33_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a3, $a2, 0
+; LA32-NEXT:    and $a6, $a3, $a4
+; LA32-NEXT:    move $a5, $a3
+; LA32-NEXT:    bgeu $a6, $a1, .LBB33_3
+; LA32-NEXT:  # %bb.2: # in Loop: Header=BB33_1 Depth=1
+; LA32-NEXT:    xor $a5, $a3, $a1
+; LA32-NEXT:    and $a5, $a5, $a4
+; LA32-NEXT:    xor $a5, $a3, $a5
+; LA32-NEXT:  .LBB33_3: # in Loop: Header=BB33_1 Depth=1
+; LA32-NEXT:    sc.w $a5, $a2, 0
+; LA32-NEXT:    beq $a5, $zero, .LBB33_1
+; LA32-NEXT:  # %bb.4:
+; LA32-NEXT:    srl.w $a0, $a3, $a0
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_umax_i16_acq_rel:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    slli.d $a2, $a0, 3
@@ -714,6 +1815,21 @@ define i16 @atomicrmw_umax_i16_acq_rel(ptr %a, i16 %b) nounwind {
 }
 
 define i32 @atomicrmw_umax_i32_acq_rel(ptr %a, i32 %b) nounwind {
+; LA32-LABEL: atomicrmw_umax_i32_acq_rel:
+; LA32:       # %bb.0:
+; LA32-NEXT:  .LBB34_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a2, $a0, 0
+; LA32-NEXT:    move $a3, $a2
+; LA32-NEXT:    bgeu $a2, $a1, .LBB34_3
+; LA32-NEXT:  # %bb.2: # in Loop: Header=BB34_1 Depth=1
+; LA32-NEXT:    move $a3, $a1
+; LA32-NEXT:  .LBB34_3: # in Loop: Header=BB34_1 Depth=1
+; LA32-NEXT:    sc.w $a3, $a0, 0
+; LA32-NEXT:    beq $a3, $zero, .LBB34_1
+; LA32-NEXT:  # %bb.4:
+; LA32-NEXT:    move $a0, $a2
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_umax_i32_acq_rel:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ammax_db.wu $a2, $a1, $a0
@@ -724,6 +1840,65 @@ define i32 @atomicrmw_umax_i32_acq_rel(ptr %a, i32 %b) nounwind {
 }
 
 define i64 @atomicrmw_umax_i64_acq_rel(ptr %a, i64 %b) nounwind {
+; LA32-LABEL: atomicrmw_umax_i64_acq_rel:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $sp, $sp, -32
+; LA32-NEXT:    st.w $ra, $sp, 28 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $fp, $sp, 24 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s0, $sp, 20 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s1, $sp, 16 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s2, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    move $fp, $a0
+; LA32-NEXT:    ld.w $a5, $a0, 4
+; LA32-NEXT:    ld.w $a4, $a0, 0
+; LA32-NEXT:    move $s0, $a2
+; LA32-NEXT:    move $s1, $a1
+; LA32-NEXT:    addi.w $s2, $sp, 0
+; LA32-NEXT:    b .LBB35_2
+; LA32-NEXT:    .p2align 4, , 16
+; LA32-NEXT:  .LBB35_1: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB35_2 Depth=1
+; LA32-NEXT:    st.w $a4, $sp, 0
+; LA32-NEXT:    st.w $a5, $sp, 4
+; LA32-NEXT:    ori $a4, $zero, 4
+; LA32-NEXT:    ori $a5, $zero, 2
+; LA32-NEXT:    move $a0, $fp
+; LA32-NEXT:    move $a1, $s2
+; LA32-NEXT:    bl __atomic_compare_exchange_8
+; LA32-NEXT:    ld.w $a5, $sp, 4
+; LA32-NEXT:    ld.w $a4, $sp, 0
+; LA32-NEXT:    bne $a0, $zero, .LBB35_7
+; LA32-NEXT:  .LBB35_2: # %atomicrmw.start
+; LA32-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    beq $a5, $s0, .LBB35_4
+; LA32-NEXT:  # %bb.3: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB35_2 Depth=1
+; LA32-NEXT:    sltu $a0, $s0, $a5
+; LA32-NEXT:    b .LBB35_5
+; LA32-NEXT:    .p2align 4, , 16
+; LA32-NEXT:  .LBB35_4: # in Loop: Header=BB35_2 Depth=1
+; LA32-NEXT:    sltu $a0, $s1, $a4
+; LA32-NEXT:  .LBB35_5: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB35_2 Depth=1
+; LA32-NEXT:    move $a2, $a4
+; LA32-NEXT:    move $a3, $a5
+; LA32-NEXT:    bne $a0, $zero, .LBB35_1
+; LA32-NEXT:  # %bb.6: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB35_2 Depth=1
+; LA32-NEXT:    move $a2, $s1
+; LA32-NEXT:    move $a3, $s0
+; LA32-NEXT:    b .LBB35_1
+; LA32-NEXT:  .LBB35_7: # %atomicrmw.end
+; LA32-NEXT:    move $a0, $a4
+; LA32-NEXT:    move $a1, $a5
+; LA32-NEXT:    ld.w $s2, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $s1, $sp, 16 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $s0, $sp, 20 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $fp, $sp, 24 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $ra, $sp, 28 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 32
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_umax_i64_acq_rel:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ammax_db.du $a2, $a1, $a0
@@ -734,6 +1909,31 @@ define i64 @atomicrmw_umax_i64_acq_rel(ptr %a, i64 %b) nounwind {
 }
 
 define i8 @atomicrmw_umin_i8_acq_rel(ptr %a, i8 %b) nounwind {
+; LA32-LABEL: atomicrmw_umin_i8_acq_rel:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $a2, $zero, -4
+; LA32-NEXT:    and $a2, $a0, $a2
+; LA32-NEXT:    slli.w $a0, $a0, 3
+; LA32-NEXT:    ori $a3, $zero, 255
+; LA32-NEXT:    sll.w $a3, $a3, $a0
+; LA32-NEXT:    andi $a1, $a1, 255
+; LA32-NEXT:    sll.w $a1, $a1, $a0
+; LA32-NEXT:  .LBB36_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a4, $a2, 0
+; LA32-NEXT:    and $a6, $a4, $a3
+; LA32-NEXT:    move $a5, $a4
+; LA32-NEXT:    bgeu $a1, $a6, .LBB36_3
+; LA32-NEXT:  # %bb.2: # in Loop: Header=BB36_1 Depth=1
+; LA32-NEXT:    xor $a5, $a4, $a1
+; LA32-NEXT:    and $a5, $a5, $a3
+; LA32-NEXT:    xor $a5, $a4, $a5
+; LA32-NEXT:  .LBB36_3: # in Loop: Header=BB36_1 Depth=1
+; LA32-NEXT:    sc.w $a5, $a2, 0
+; LA32-NEXT:    beq $a5, $zero, .LBB36_1
+; LA32-NEXT:  # %bb.4:
+; LA32-NEXT:    srl.w $a0, $a4, $a0
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_umin_i8_acq_rel:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    slli.d $a2, $a0, 3
@@ -762,6 +1962,32 @@ define i8 @atomicrmw_umin_i8_acq_rel(ptr %a, i8 %b) nounwind {
 }
 
 define i16 @atomicrmw_umin_i16_acq_rel(ptr %a, i16 %b) nounwind {
+; LA32-LABEL: atomicrmw_umin_i16_acq_rel:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $a2, $zero, -4
+; LA32-NEXT:    and $a2, $a0, $a2
+; LA32-NEXT:    slli.w $a0, $a0, 3
+; LA32-NEXT:    lu12i.w $a3, 15
+; LA32-NEXT:    ori $a3, $a3, 4095
+; LA32-NEXT:    sll.w $a4, $a3, $a0
+; LA32-NEXT:    and $a1, $a1, $a3
+; LA32-NEXT:    sll.w $a1, $a1, $a0
+; LA32-NEXT:  .LBB37_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a3, $a2, 0
+; LA32-NEXT:    and $a6, $a3, $a4
+; LA32-NEXT:    move $a5, $a3
+; LA32-NEXT:    bgeu $a1, $a6, .LBB37_3
+; LA32-NEXT:  # %bb.2: # in Loop: Header=BB37_1 Depth=1
+; LA32-NEXT:    xor $a5, $a3, $a1
+; LA32-NEXT:    and $a5, $a5, $a4
+; LA32-NEXT:    xor $a5, $a3, $a5
+; LA32-NEXT:  .LBB37_3: # in Loop: Header=BB37_1 Depth=1
+; LA32-NEXT:    sc.w $a5, $a2, 0
+; LA32-NEXT:    beq $a5, $zero, .LBB37_1
+; LA32-NEXT:  # %bb.4:
+; LA32-NEXT:    srl.w $a0, $a3, $a0
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_umin_i16_acq_rel:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    slli.d $a2, $a0, 3
@@ -791,6 +2017,21 @@ define i16 @atomicrmw_umin_i16_acq_rel(ptr %a, i16 %b) nounwind {
 }
 
 define i32 @atomicrmw_umin_i32_acq_rel(ptr %a, i32 %b) nounwind {
+; LA32-LABEL: atomicrmw_umin_i32_acq_rel:
+; LA32:       # %bb.0:
+; LA32-NEXT:  .LBB38_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a2, $a0, 0
+; LA32-NEXT:    move $a3, $a2
+; LA32-NEXT:    bgeu $a1, $a2, .LBB38_3
+; LA32-NEXT:  # %bb.2: # in Loop: Header=BB38_1 Depth=1
+; LA32-NEXT:    move $a3, $a1
+; LA32-NEXT:  .LBB38_3: # in Loop: Header=BB38_1 Depth=1
+; LA32-NEXT:    sc.w $a3, $a0, 0
+; LA32-NEXT:    beq $a3, $zero, .LBB38_1
+; LA32-NEXT:  # %bb.4:
+; LA32-NEXT:    move $a0, $a2
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_umin_i32_acq_rel:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ammin_db.wu $a2, $a1, $a0
@@ -801,6 +2042,66 @@ define i32 @atomicrmw_umin_i32_acq_rel(ptr %a, i32 %b) nounwind {
 }
 
 define i64 @atomicrmw_umin_i64_acq_rel(ptr %a, i64 %b) nounwind {
+; LA32-LABEL: atomicrmw_umin_i64_acq_rel:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $sp, $sp, -32
+; LA32-NEXT:    st.w $ra, $sp, 28 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $fp, $sp, 24 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s0, $sp, 20 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s1, $sp, 16 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s2, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    move $fp, $a0
+; LA32-NEXT:    ld.w $a5, $a0, 4
+; LA32-NEXT:    ld.w $a4, $a0, 0
+; LA32-NEXT:    move $s0, $a2
+; LA32-NEXT:    move $s1, $a1
+; LA32-NEXT:    addi.w $s2, $sp, 0
+; LA32-NEXT:    b .LBB39_2
+; LA32-NEXT:    .p2align 4, , 16
+; LA32-NEXT:  .LBB39_1: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB39_2 Depth=1
+; LA32-NEXT:    st.w $a4, $sp, 0
+; LA32-NEXT:    st.w $a5, $sp, 4
+; LA32-NEXT:    ori $a4, $zero, 4
+; LA32-NEXT:    ori $a5, $zero, 2
+; LA32-NEXT:    move $a0, $fp
+; LA32-NEXT:    move $a1, $s2
+; LA32-NEXT:    bl __atomic_compare_exchange_8
+; LA32-NEXT:    ld.w $a5, $sp, 4
+; LA32-NEXT:    ld.w $a4, $sp, 0
+; LA32-NEXT:    bne $a0, $zero, .LBB39_7
+; LA32-NEXT:  .LBB39_2: # %atomicrmw.start
+; LA32-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    beq $a5, $s0, .LBB39_4
+; LA32-NEXT:  # %bb.3: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB39_2 Depth=1
+; LA32-NEXT:    sltu $a0, $s0, $a5
+; LA32-NEXT:    b .LBB39_5
+; LA32-NEXT:    .p2align 4, , 16
+; LA32-NEXT:  .LBB39_4: # in Loop: Header=BB39_2 Depth=1
+; LA32-NEXT:    sltu $a0, $s1, $a4
+; LA32-NEXT:  .LBB39_5: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB39_2 Depth=1
+; LA32-NEXT:    xori $a0, $a0, 1
+; LA32-NEXT:    move $a2, $a4
+; LA32-NEXT:    move $a3, $a5
+; LA32-NEXT:    bne $a0, $zero, .LBB39_1
+; LA32-NEXT:  # %bb.6: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB39_2 Depth=1
+; LA32-NEXT:    move $a2, $s1
+; LA32-NEXT:    move $a3, $s0
+; LA32-NEXT:    b .LBB39_1
+; LA32-NEXT:  .LBB39_7: # %atomicrmw.end
+; LA32-NEXT:    move $a0, $a4
+; LA32-NEXT:    move $a1, $a5
+; LA32-NEXT:    ld.w $s2, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $s1, $sp, 16 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $s0, $sp, 20 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $fp, $sp, 24 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $ra, $sp, 28 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 32
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_umin_i64_acq_rel:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ammin_db.du $a2, $a1, $a0
@@ -811,6 +2112,36 @@ define i64 @atomicrmw_umin_i64_acq_rel(ptr %a, i64 %b) nounwind {
 }
 
 define i8 @atomicrmw_max_i8_acq_rel(ptr %a, i8 %b) nounwind {
+; LA32-LABEL: atomicrmw_max_i8_acq_rel:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $a2, $zero, -4
+; LA32-NEXT:    and $a2, $a0, $a2
+; LA32-NEXT:    slli.w $a0, $a0, 3
+; LA32-NEXT:    andi $a3, $a0, 24
+; LA32-NEXT:    ori $a4, $zero, 255
+; LA32-NEXT:    sll.w $a4, $a4, $a0
+; LA32-NEXT:    slli.w $a1, $a1, 24
+; LA32-NEXT:    srai.w $a1, $a1, 24
+; LA32-NEXT:    sll.w $a1, $a1, $a0
+; LA32-NEXT:    xori $a3, $a3, 24
+; LA32-NEXT:  .LBB40_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a5, $a2, 0
+; LA32-NEXT:    and $a7, $a5, $a4
+; LA32-NEXT:    move $a6, $a5
+; LA32-NEXT:    sll.w $a7, $a7, $a3
+; LA32-NEXT:    sra.w $a7, $a7, $a3
+; LA32-NEXT:    bge $a7, $a1, .LBB40_3
+; LA32-NEXT:  # %bb.2: # in Loop: Header=BB40_1 Depth=1
+; LA32-NEXT:    xor $a6, $a5, $a1
+; LA32-NEXT:    and $a6, $a6, $a4
+; LA32-NEXT:    xor $a6, $a5, $a6
+; LA32-NEXT:  .LBB40_3: # in Loop: Header=BB40_1 Depth=1
+; LA32-NEXT:    sc.w $a6, $a2, 0
+; LA32-NEXT:    beq $a6, $zero, .LBB40_1
+; LA32-NEXT:  # %bb.4:
+; LA32-NEXT:    srl.w $a0, $a5, $a0
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_max_i8_acq_rel:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    slli.d $a2, $a0, 3
@@ -843,6 +2174,38 @@ define i8 @atomicrmw_max_i8_acq_rel(ptr %a, i8 %b) nounwind {
 }
 
 define i16 @atomicrmw_max_i16_acq_rel(ptr %a, i16 %b) nounwind {
+; LA32-LABEL: atomicrmw_max_i16_acq_rel:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $a2, $zero, -4
+; LA32-NEXT:    and $a2, $a0, $a2
+; LA32-NEXT:    slli.w $a0, $a0, 3
+; LA32-NEXT:    andi $a3, $a0, 24
+; LA32-NEXT:    lu12i.w $a4, 15
+; LA32-NEXT:    ori $a4, $a4, 4095
+; LA32-NEXT:    sll.w $a4, $a4, $a0
+; LA32-NEXT:    slli.w $a1, $a1, 16
+; LA32-NEXT:    srai.w $a1, $a1, 16
+; LA32-NEXT:    sll.w $a1, $a1, $a0
+; LA32-NEXT:    ori $a5, $zero, 16
+; LA32-NEXT:    sub.w $a3, $a5, $a3
+; LA32-NEXT:  .LBB41_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a5, $a2, 0
+; LA32-NEXT:    and $a7, $a5, $a4
+; LA32-NEXT:    move $a6, $a5
+; LA32-NEXT:    sll.w $a7, $a7, $a3
+; LA32-NEXT:    sra.w $a7, $a7, $a3
+; LA32-NEXT:    bge $a7, $a1, .LBB41_3
+; LA32-NEXT:  # %bb.2: # in Loop: Header=BB41_1 Depth=1
+; LA32-NEXT:    xor $a6, $a5, $a1
+; LA32-NEXT:    and $a6, $a6, $a4
+; LA32-NEXT:    xor $a6, $a5, $a6
+; LA32-NEXT:  .LBB41_3: # in Loop: Header=BB41_1 Depth=1
+; LA32-NEXT:    sc.w $a6, $a2, 0
+; LA32-NEXT:    beq $a6, $zero, .LBB41_1
+; LA32-NEXT:  # %bb.4:
+; LA32-NEXT:    srl.w $a0, $a5, $a0
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_max_i16_acq_rel:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    slli.d $a2, $a0, 3
@@ -877,6 +2240,21 @@ define i16 @atomicrmw_max_i16_acq_rel(ptr %a, i16 %b) nounwind {
 }
 
 define i32 @atomicrmw_max_i32_acq_rel(ptr %a, i32 %b) nounwind {
+; LA32-LABEL: atomicrmw_max_i32_acq_rel:
+; LA32:       # %bb.0:
+; LA32-NEXT:  .LBB42_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a2, $a0, 0
+; LA32-NEXT:    move $a3, $a2
+; LA32-NEXT:    bge $a2, $a1, .LBB42_3
+; LA32-NEXT:  # %bb.2: # in Loop: Header=BB42_1 Depth=1
+; LA32-NEXT:    move $a3, $a1
+; LA32-NEXT:  .LBB42_3: # in Loop: Header=BB42_1 Depth=1
+; LA32-NEXT:    sc.w $a3, $a0, 0
+; LA32-NEXT:    beq $a3, $zero, .LBB42_1
+; LA32-NEXT:  # %bb.4:
+; LA32-NEXT:    move $a0, $a2
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_max_i32_acq_rel:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ammax_db.w $a2, $a1, $a0
@@ -887,6 +2265,65 @@ define i32 @atomicrmw_max_i32_acq_rel(ptr %a, i32 %b) nounwind {
 }
 
 define i64 @atomicrmw_max_i64_acq_rel(ptr %a, i64 %b) nounwind {
+; LA32-LABEL: atomicrmw_max_i64_acq_rel:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $sp, $sp, -32
+; LA32-NEXT:    st.w $ra, $sp, 28 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $fp, $sp, 24 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s0, $sp, 20 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s1, $sp, 16 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s2, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    move $fp, $a0
+; LA32-NEXT:    ld.w $a5, $a0, 4
+; LA32-NEXT:    ld.w $a4, $a0, 0
+; LA32-NEXT:    move $s0, $a2
+; LA32-NEXT:    move $s1, $a1
+; LA32-NEXT:    addi.w $s2, $sp, 0
+; LA32-NEXT:    b .LBB43_2
+; LA32-NEXT:    .p2align 4, , 16
+; LA32-NEXT:  .LBB43_1: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB43_2 Depth=1
+; LA32-NEXT:    st.w $a4, $sp, 0
+; LA32-NEXT:    st.w $a5, $sp, 4
+; LA32-NEXT:    ori $a4, $zero, 4
+; LA32-NEXT:    ori $a5, $zero, 2
+; LA32-NEXT:    move $a0, $fp
+; LA32-NEXT:    move $a1, $s2
+; LA32-NEXT:    bl __atomic_compare_exchange_8
+; LA32-NEXT:    ld.w $a5, $sp, 4
+; LA32-NEXT:    ld.w $a4, $sp, 0
+; LA32-NEXT:    bne $a0, $zero, .LBB43_7
+; LA32-NEXT:  .LBB43_2: # %atomicrmw.start
+; LA32-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    beq $a5, $s0, .LBB43_4
+; LA32-NEXT:  # %bb.3: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB43_2 Depth=1
+; LA32-NEXT:    slt $a0, $s0, $a5
+; LA32-NEXT:    b .LBB43_5
+; LA32-NEXT:    .p2align 4, , 16
+; LA32-NEXT:  .LBB43_4: # in Loop: Header=BB43_2 Depth=1
+; LA32-NEXT:    sltu $a0, $s1, $a4
+; LA32-NEXT:  .LBB43_5: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB43_2 Depth=1
+; LA32-NEXT:    move $a2, $a4
+; LA32-NEXT:    move $a3, $a5
+; LA32-NEXT:    bne $a0, $zero, .LBB43_1
+; LA32-NEXT:  # %bb.6: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB43_2 Depth=1
+; LA32-NEXT:    move $a2, $s1
+; LA32-NEXT:    move $a3, $s0
+; LA32-NEXT:    b .LBB43_1
+; LA32-NEXT:  .LBB43_7: # %atomicrmw.end
+; LA32-NEXT:    move $a0, $a4
+; LA32-NEXT:    move $a1, $a5
+; LA32-NEXT:    ld.w $s2, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $s1, $sp, 16 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $s0, $sp, 20 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $fp, $sp, 24 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $ra, $sp, 28 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 32
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_max_i64_acq_rel:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ammax_db.d $a2, $a1, $a0
@@ -897,6 +2334,36 @@ define i64 @atomicrmw_max_i64_acq_rel(ptr %a, i64 %b) nounwind {
 }
 
 define i8 @atomicrmw_min_i8_acq_rel(ptr %a, i8 %b) nounwind {
+; LA32-LABEL: atomicrmw_min_i8_acq_rel:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $a2, $zero, -4
+; LA32-NEXT:    and $a2, $a0, $a2
+; LA32-NEXT:    slli.w $a0, $a0, 3
+; LA32-NEXT:    andi $a3, $a0, 24
+; LA32-NEXT:    ori $a4, $zero, 255
+; LA32-NEXT:    sll.w $a4, $a4, $a0
+; LA32-NEXT:    slli.w $a1, $a1, 24
+; LA32-NEXT:    srai.w $a1, $a1, 24
+; LA32-NEXT:    sll.w $a1, $a1, $a0
+; LA32-NEXT:    xori $a3, $a3, 24
+; LA32-NEXT:  .LBB44_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a5, $a2, 0
+; LA32-NEXT:    and $a7, $a5, $a4
+; LA32-NEXT:    move $a6, $a5
+; LA32-NEXT:    sll.w $a7, $a7, $a3
+; LA32-NEXT:    sra.w $a7, $a7, $a3
+; LA32-NEXT:    bge $a1, $a7, .LBB44_3
+; LA32-NEXT:  # %bb.2: # in Loop: Header=BB44_1 Depth=1
+; LA32-NEXT:    xor $a6, $a5, $a1
+; LA32-NEXT:    and $a6, $a6, $a4
+; LA32-NEXT:    xor $a6, $a5, $a6
+; LA32-NEXT:  .LBB44_3: # in Loop: Header=BB44_1 Depth=1
+; LA32-NEXT:    sc.w $a6, $a2, 0
+; LA32-NEXT:    beq $a6, $zero, .LBB44_1
+; LA32-NEXT:  # %bb.4:
+; LA32-NEXT:    srl.w $a0, $a5, $a0
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_min_i8_acq_rel:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    slli.d $a2, $a0, 3
@@ -929,6 +2396,38 @@ define i8 @atomicrmw_min_i8_acq_rel(ptr %a, i8 %b) nounwind {
 }
 
 define i16 @atomicrmw_min_i16_acq_rel(ptr %a, i16 %b) nounwind {
+; LA32-LABEL: atomicrmw_min_i16_acq_rel:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $a2, $zero, -4
+; LA32-NEXT:    and $a2, $a0, $a2
+; LA32-NEXT:    slli.w $a0, $a0, 3
+; LA32-NEXT:    andi $a3, $a0, 24
+; LA32-NEXT:    lu12i.w $a4, 15
+; LA32-NEXT:    ori $a4, $a4, 4095
+; LA32-NEXT:    sll.w $a4, $a4, $a0
+; LA32-NEXT:    slli.w $a1, $a1, 16
+; LA32-NEXT:    srai.w $a1, $a1, 16
+; LA32-NEXT:    sll.w $a1, $a1, $a0
+; LA32-NEXT:    ori $a5, $zero, 16
+; LA32-NEXT:    sub.w $a3, $a5, $a3
+; LA32-NEXT:  .LBB45_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a5, $a2, 0
+; LA32-NEXT:    and $a7, $a5, $a4
+; LA32-NEXT:    move $a6, $a5
+; LA32-NEXT:    sll.w $a7, $a7, $a3
+; LA32-NEXT:    sra.w $a7, $a7, $a3
+; LA32-NEXT:    bge $a1, $a7, .LBB45_3
+; LA32-NEXT:  # %bb.2: # in Loop: Header=BB45_1 Depth=1
+; LA32-NEXT:    xor $a6, $a5, $a1
+; LA32-NEXT:    and $a6, $a6, $a4
+; LA32-NEXT:    xor $a6, $a5, $a6
+; LA32-NEXT:  .LBB45_3: # in Loop: Header=BB45_1 Depth=1
+; LA32-NEXT:    sc.w $a6, $a2, 0
+; LA32-NEXT:    beq $a6, $zero, .LBB45_1
+; LA32-NEXT:  # %bb.4:
+; LA32-NEXT:    srl.w $a0, $a5, $a0
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_min_i16_acq_rel:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    slli.d $a2, $a0, 3
@@ -963,6 +2462,21 @@ define i16 @atomicrmw_min_i16_acq_rel(ptr %a, i16 %b) nounwind {
 }
 
 define i32 @atomicrmw_min_i32_acq_rel(ptr %a, i32 %b) nounwind {
+; LA32-LABEL: atomicrmw_min_i32_acq_rel:
+; LA32:       # %bb.0:
+; LA32-NEXT:  .LBB46_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a2, $a0, 0
+; LA32-NEXT:    move $a3, $a2
+; LA32-NEXT:    bge $a1, $a2, .LBB46_3
+; LA32-NEXT:  # %bb.2: # in Loop: Header=BB46_1 Depth=1
+; LA32-NEXT:    move $a3, $a1
+; LA32-NEXT:  .LBB46_3: # in Loop: Header=BB46_1 Depth=1
+; LA32-NEXT:    sc.w $a3, $a0, 0
+; LA32-NEXT:    beq $a3, $zero, .LBB46_1
+; LA32-NEXT:  # %bb.4:
+; LA32-NEXT:    move $a0, $a2
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_min_i32_acq_rel:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ammin_db.w $a2, $a1, $a0
@@ -973,6 +2487,66 @@ define i32 @atomicrmw_min_i32_acq_rel(ptr %a, i32 %b) nounwind {
 }
 
 define i64 @atomicrmw_min_i64_acq_rel(ptr %a, i64 %b) nounwind {
+; LA32-LABEL: atomicrmw_min_i64_acq_rel:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $sp, $sp, -32
+; LA32-NEXT:    st.w $ra, $sp, 28 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $fp, $sp, 24 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s0, $sp, 20 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s1, $sp, 16 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s2, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    move $fp, $a0
+; LA32-NEXT:    ld.w $a5, $a0, 4
+; LA32-NEXT:    ld.w $a4, $a0, 0
+; LA32-NEXT:    move $s0, $a2
+; LA32-NEXT:    move $s1, $a1
+; LA32-NEXT:    addi.w $s2, $sp, 0
+; LA32-NEXT:    b .LBB47_2
+; LA32-NEXT:    .p2align 4, , 16
+; LA32-NEXT:  .LBB47_1: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB47_2 Depth=1
+; LA32-NEXT:    st.w $a4, $sp, 0
+; LA32-NEXT:    st.w $a5, $sp, 4
+; LA32-NEXT:    ori $a4, $zero, 4
+; LA32-NEXT:    ori $a5, $zero, 2
+; LA32-NEXT:    move $a0, $fp
+; LA32-NEXT:    move $a1, $s2
+; LA32-NEXT:    bl __atomic_compare_exchange_8
+; LA32-NEXT:    ld.w $a5, $sp, 4
+; LA32-NEXT:    ld.w $a4, $sp, 0
+; LA32-NEXT:    bne $a0, $zero, .LBB47_7
+; LA32-NEXT:  .LBB47_2: # %atomicrmw.start
+; LA32-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    beq $a5, $s0, .LBB47_4
+; LA32-NEXT:  # %bb.3: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB47_2 Depth=1
+; LA32-NEXT:    slt $a0, $s0, $a5
+; LA32-NEXT:    b .LBB47_5
+; LA32-NEXT:    .p2align 4, , 16
+; LA32-NEXT:  .LBB47_4: # in Loop: Header=BB47_2 Depth=1
+; LA32-NEXT:    sltu $a0, $s1, $a4
+; LA32-NEXT:  .LBB47_5: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB47_2 Depth=1
+; LA32-NEXT:    xori $a0, $a0, 1
+; LA32-NEXT:    move $a2, $a4
+; LA32-NEXT:    move $a3, $a5
+; LA32-NEXT:    bne $a0, $zero, .LBB47_1
+; LA32-NEXT:  # %bb.6: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB47_2 Depth=1
+; LA32-NEXT:    move $a2, $s1
+; LA32-NEXT:    move $a3, $s0
+; LA32-NEXT:    b .LBB47_1
+; LA32-NEXT:  .LBB47_7: # %atomicrmw.end
+; LA32-NEXT:    move $a0, $a4
+; LA32-NEXT:    move $a1, $a5
+; LA32-NEXT:    ld.w $s2, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $s1, $sp, 16 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $s0, $sp, 20 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $fp, $sp, 24 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $ra, $sp, 28 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 32
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_min_i64_acq_rel:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ammin_db.d $a2, $a1, $a0
@@ -983,6 +2557,31 @@ define i64 @atomicrmw_min_i64_acq_rel(ptr %a, i64 %b) nounwind {
 }
 
 define i8 @atomicrmw_umax_i8_seq_cst(ptr %a, i8 %b) nounwind {
+; LA32-LABEL: atomicrmw_umax_i8_seq_cst:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $a2, $zero, -4
+; LA32-NEXT:    and $a2, $a0, $a2
+; LA32-NEXT:    slli.w $a0, $a0, 3
+; LA32-NEXT:    ori $a3, $zero, 255
+; LA32-NEXT:    sll.w $a3, $a3, $a0
+; LA32-NEXT:    andi $a1, $a1, 255
+; LA32-NEXT:    sll.w $a1, $a1, $a0
+; LA32-NEXT:  .LBB48_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a4, $a2, 0
+; LA32-NEXT:    and $a6, $a4, $a3
+; LA32-NEXT:    move $a5, $a4
+; LA32-NEXT:    bgeu $a6, $a1, .LBB48_3
+; LA32-NEXT:  # %bb.2: # in Loop: Header=BB48_1 Depth=1
+; LA32-NEXT:    xor $a5, $a4, $a1
+; LA32-NEXT:    and $a5, $a5, $a3
+; LA32-NEXT:    xor $a5, $a4, $a5
+; LA32-NEXT:  .LBB48_3: # in Loop: Header=BB48_1 Depth=1
+; LA32-NEXT:    sc.w $a5, $a2, 0
+; LA32-NEXT:    beq $a5, $zero, .LBB48_1
+; LA32-NEXT:  # %bb.4:
+; LA32-NEXT:    srl.w $a0, $a4, $a0
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_umax_i8_seq_cst:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    slli.d $a2, $a0, 3
@@ -1011,6 +2610,32 @@ define i8 @atomicrmw_umax_i8_seq_cst(ptr %a, i8 %b) nounwind {
 }
 
 define i16 @atomicrmw_umax_i16_seq_cst(ptr %a, i16 %b) nounwind {
+; LA32-LABEL: atomicrmw_umax_i16_seq_cst:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $a2, $zero, -4
+; LA32-NEXT:    and $a2, $a0, $a2
+; LA32-NEXT:    slli.w $a0, $a0, 3
+; LA32-NEXT:    lu12i.w $a3, 15
+; LA32-NEXT:    ori $a3, $a3, 4095
+; LA32-NEXT:    sll.w $a4, $a3, $a0
+; LA32-NEXT:    and $a1, $a1, $a3
+; LA32-NEXT:    sll.w $a1, $a1, $a0
+; LA32-NEXT:  .LBB49_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a3, $a2, 0
+; LA32-NEXT:    and $a6, $a3, $a4
+; LA32-NEXT:    move $a5, $a3
+; LA32-NEXT:    bgeu $a6, $a1, .LBB49_3
+; LA32-NEXT:  # %bb.2: # in Loop: Header=BB49_1 Depth=1
+; LA32-NEXT:    xor $a5, $a3, $a1
+; LA32-NEXT:    and $a5, $a5, $a4
+; LA32-NEXT:    xor $a5, $a3, $a5
+; LA32-NEXT:  .LBB49_3: # in Loop: Header=BB49_1 Depth=1
+; LA32-NEXT:    sc.w $a5, $a2, 0
+; LA32-NEXT:    beq $a5, $zero, .LBB49_1
+; LA32-NEXT:  # %bb.4:
+; LA32-NEXT:    srl.w $a0, $a3, $a0
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_umax_i16_seq_cst:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    slli.d $a2, $a0, 3
@@ -1040,6 +2665,21 @@ define i16 @atomicrmw_umax_i16_seq_cst(ptr %a, i16 %b) nounwind {
 }
 
 define i32 @atomicrmw_umax_i32_seq_cst(ptr %a, i32 %b) nounwind {
+; LA32-LABEL: atomicrmw_umax_i32_seq_cst:
+; LA32:       # %bb.0:
+; LA32-NEXT:  .LBB50_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a2, $a0, 0
+; LA32-NEXT:    move $a3, $a2
+; LA32-NEXT:    bgeu $a2, $a1, .LBB50_3
+; LA32-NEXT:  # %bb.2: # in Loop: Header=BB50_1 Depth=1
+; LA32-NEXT:    move $a3, $a1
+; LA32-NEXT:  .LBB50_3: # in Loop: Header=BB50_1 Depth=1
+; LA32-NEXT:    sc.w $a3, $a0, 0
+; LA32-NEXT:    beq $a3, $zero, .LBB50_1
+; LA32-NEXT:  # %bb.4:
+; LA32-NEXT:    move $a0, $a2
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_umax_i32_seq_cst:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ammax_db.wu $a2, $a1, $a0
@@ -1050,6 +2690,65 @@ define i32 @atomicrmw_umax_i32_seq_cst(ptr %a, i32 %b) nounwind {
 }
 
 define i64 @atomicrmw_umax_i64_seq_cst(ptr %a, i64 %b) nounwind {
+; LA32-LABEL: atomicrmw_umax_i64_seq_cst:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $sp, $sp, -32
+; LA32-NEXT:    st.w $ra, $sp, 28 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $fp, $sp, 24 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s0, $sp, 20 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s1, $sp, 16 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s2, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    move $fp, $a0
+; LA32-NEXT:    ld.w $a5, $a0, 4
+; LA32-NEXT:    ld.w $a4, $a0, 0
+; LA32-NEXT:    move $s0, $a2
+; LA32-NEXT:    move $s1, $a1
+; LA32-NEXT:    addi.w $s2, $sp, 0
+; LA32-NEXT:    b .LBB51_2
+; LA32-NEXT:    .p2align 4, , 16
+; LA32-NEXT:  .LBB51_1: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB51_2 Depth=1
+; LA32-NEXT:    st.w $a4, $sp, 0
+; LA32-NEXT:    st.w $a5, $sp, 4
+; LA32-NEXT:    ori $a4, $zero, 5
+; LA32-NEXT:    ori $a5, $zero, 5
+; LA32-NEXT:    move $a0, $fp
+; LA32-NEXT:    move $a1, $s2
+; LA32-NEXT:    bl __atomic_compare_exchange_8
+; LA32-NEXT:    ld.w $a5, $sp, 4
+; LA32-NEXT:    ld.w $a4, $sp, 0
+; LA32-NEXT:    bne $a0, $zero, .LBB51_7
+; LA32-NEXT:  .LBB51_2: # %atomicrmw.start
+; LA32-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    beq $a5, $s0, .LBB51_4
+; LA32-NEXT:  # %bb.3: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB51_2 Depth=1
+; LA32-NEXT:    sltu $a0, $s0, $a5
+; LA32-NEXT:    b .LBB51_5
+; LA32-NEXT:    .p2align 4, , 16
+; LA32-NEXT:  .LBB51_4: # in Loop: Header=BB51_2 Depth=1
+; LA32-NEXT:    sltu $a0, $s1, $a4
+; LA32-NEXT:  .LBB51_5: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB51_2 Depth=1
+; LA32-NEXT:    move $a2, $a4
+; LA32-NEXT:    move $a3, $a5
+; LA32-NEXT:    bne $a0, $zero, .LBB51_1
+; LA32-NEXT:  # %bb.6: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB51_2 Depth=1
+; LA32-NEXT:    move $a2, $s1
+; LA32-NEXT:    move $a3, $s0
+; LA32-NEXT:    b .LBB51_1
+; LA32-NEXT:  .LBB51_7: # %atomicrmw.end
+; LA32-NEXT:    move $a0, $a4
+; LA32-NEXT:    move $a1, $a5
+; LA32-NEXT:    ld.w $s2, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $s1, $sp, 16 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $s0, $sp, 20 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $fp, $sp, 24 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $ra, $sp, 28 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 32
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_umax_i64_seq_cst:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ammax_db.du $a2, $a1, $a0
@@ -1060,6 +2759,31 @@ define i64 @atomicrmw_umax_i64_seq_cst(ptr %a, i64 %b) nounwind {
 }
 
 define i8 @atomicrmw_umin_i8_seq_cst(ptr %a, i8 %b) nounwind {
+; LA32-LABEL: atomicrmw_umin_i8_seq_cst:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $a2, $zero, -4
+; LA32-NEXT:    and $a2, $a0, $a2
+; LA32-NEXT:    slli.w $a0, $a0, 3
+; LA32-NEXT:    ori $a3, $zero, 255
+; LA32-NEXT:    sll.w $a3, $a3, $a0
+; LA32-NEXT:    andi $a1, $a1, 255
+; LA32-NEXT:    sll.w $a1, $a1, $a0
+; LA32-NEXT:  .LBB52_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a4, $a2, 0
+; LA32-NEXT:    and $a6, $a4, $a3
+; LA32-NEXT:    move $a5, $a4
+; LA32-NEXT:    bgeu $a1, $a6, .LBB52_3
+; LA32-NEXT:  # %bb.2: # in Loop: Header=BB52_1 Depth=1
+; LA32-NEXT:    xor $a5, $a4, $a1
+; LA32-NEXT:    and $a5, $a5, $a3
+; LA32-NEXT:    xor $a5, $a4, $a5
+; LA32-NEXT:  .LBB52_3: # in Loop: Header=BB52_1 Depth=1
+; LA32-NEXT:    sc.w $a5, $a2, 0
+; LA32-NEXT:    beq $a5, $zero, .LBB52_1
+; LA32-NEXT:  # %bb.4:
+; LA32-NEXT:    srl.w $a0, $a4, $a0
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_umin_i8_seq_cst:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    slli.d $a2, $a0, 3
@@ -1088,6 +2812,32 @@ define i8 @atomicrmw_umin_i8_seq_cst(ptr %a, i8 %b) nounwind {
 }
 
 define i16 @atomicrmw_umin_i16_seq_cst(ptr %a, i16 %b) nounwind {
+; LA32-LABEL: atomicrmw_umin_i16_seq_cst:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $a2, $zero, -4
+; LA32-NEXT:    and $a2, $a0, $a2
+; LA32-NEXT:    slli.w $a0, $a0, 3
+; LA32-NEXT:    lu12i.w $a3, 15
+; LA32-NEXT:    ori $a3, $a3, 4095
+; LA32-NEXT:    sll.w $a4, $a3, $a0
+; LA32-NEXT:    and $a1, $a1, $a3
+; LA32-NEXT:    sll.w $a1, $a1, $a0
+; LA32-NEXT:  .LBB53_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a3, $a2, 0
+; LA32-NEXT:    and $a6, $a3, $a4
+; LA32-NEXT:    move $a5, $a3
+; LA32-NEXT:    bgeu $a1, $a6, .LBB53_3
+; LA32-NEXT:  # %bb.2: # in Loop: Header=BB53_1 Depth=1
+; LA32-NEXT:    xor $a5, $a3, $a1
+; LA32-NEXT:    and $a5, $a5, $a4
+; LA32-NEXT:    xor $a5, $a3, $a5
+; LA32-NEXT:  .LBB53_3: # in Loop: Header=BB53_1 Depth=1
+; LA32-NEXT:    sc.w $a5, $a2, 0
+; LA32-NEXT:    beq $a5, $zero, .LBB53_1
+; LA32-NEXT:  # %bb.4:
+; LA32-NEXT:    srl.w $a0, $a3, $a0
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_umin_i16_seq_cst:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    slli.d $a2, $a0, 3
@@ -1117,6 +2867,21 @@ define i16 @atomicrmw_umin_i16_seq_cst(ptr %a, i16 %b) nounwind {
 }
 
 define i32 @atomicrmw_umin_i32_seq_cst(ptr %a, i32 %b) nounwind {
+; LA32-LABEL: atomicrmw_umin_i32_seq_cst:
+; LA32:       # %bb.0:
+; LA32-NEXT:  .LBB54_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a2, $a0, 0
+; LA32-NEXT:    move $a3, $a2
+; LA32-NEXT:    bgeu $a1, $a2, .LBB54_3
+; LA32-NEXT:  # %bb.2: # in Loop: Header=BB54_1 Depth=1
+; LA32-NEXT:    move $a3, $a1
+; LA32-NEXT:  .LBB54_3: # in Loop: Header=BB54_1 Depth=1
+; LA32-NEXT:    sc.w $a3, $a0, 0
+; LA32-NEXT:    beq $a3, $zero, .LBB54_1
+; LA32-NEXT:  # %bb.4:
+; LA32-NEXT:    move $a0, $a2
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_umin_i32_seq_cst:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ammin_db.wu $a2, $a1, $a0
@@ -1127,6 +2892,66 @@ define i32 @atomicrmw_umin_i32_seq_cst(ptr %a, i32 %b) nounwind {
 }
 
 define i64 @atomicrmw_umin_i64_seq_cst(ptr %a, i64 %b) nounwind {
+; LA32-LABEL: atomicrmw_umin_i64_seq_cst:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $sp, $sp, -32
+; LA32-NEXT:    st.w $ra, $sp, 28 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $fp, $sp, 24 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s0, $sp, 20 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s1, $sp, 16 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s2, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    move $fp, $a0
+; LA32-NEXT:    ld.w $a5, $a0, 4
+; LA32-NEXT:    ld.w $a4, $a0, 0
+; LA32-NEXT:    move $s0, $a2
+; LA32-NEXT:    move $s1, $a1
+; LA32-NEXT:    addi.w $s2, $sp, 0
+; LA32-NEXT:    b .LBB55_2
+; LA32-NEXT:    .p2align 4, , 16
+; LA32-NEXT:  .LBB55_1: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB55_2 Depth=1
+; LA32-NEXT:    st.w $a4, $sp, 0
+; LA32-NEXT:    st.w $a5, $sp, 4
+; LA32-NEXT:    ori $a4, $zero, 5
+; LA32-NEXT:    ori $a5, $zero, 5
+; LA32-NEXT:    move $a0, $fp
+; LA32-NEXT:    move $a1, $s2
+; LA32-NEXT:    bl __atomic_compare_exchange_8
+; LA32-NEXT:    ld.w $a5, $sp, 4
+; LA32-NEXT:    ld.w $a4, $sp, 0
+; LA32-NEXT:    bne $a0, $zero, .LBB55_7
+; LA32-NEXT:  .LBB55_2: # %atomicrmw.start
+; LA32-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    beq $a5, $s0, .LBB55_4
+; LA32-NEXT:  # %bb.3: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB55_2 Depth=1
+; LA32-NEXT:    sltu $a0, $s0, $a5
+; LA32-NEXT:    b .LBB55_5
+; LA32-NEXT:    .p2align 4, , 16
+; LA32-NEXT:  .LBB55_4: # in Loop: Header=BB55_2 Depth=1
+; LA32-NEXT:    sltu $a0, $s1, $a4
+; LA32-NEXT:  .LBB55_5: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB55_2 Depth=1
+; LA32-NEXT:    xori $a0, $a0, 1
+; LA32-NEXT:    move $a2, $a4
+; LA32-NEXT:    move $a3, $a5
+; LA32-NEXT:    bne $a0, $zero, .LBB55_1
+; LA32-NEXT:  # %bb.6: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB55_2 Depth=1
+; LA32-NEXT:    move $a2, $s1
+; LA32-NEXT:    move $a3, $s0
+; LA32-NEXT:    b .LBB55_1
+; LA32-NEXT:  .LBB55_7: # %atomicrmw.end
+; LA32-NEXT:    move $a0, $a4
+; LA32-NEXT:    move $a1, $a5
+; LA32-NEXT:    ld.w $s2, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $s1, $sp, 16 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $s0, $sp, 20 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $fp, $sp, 24 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $ra, $sp, 28 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 32
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_umin_i64_seq_cst:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ammin_db.du $a2, $a1, $a0
@@ -1137,6 +2962,36 @@ define i64 @atomicrmw_umin_i64_seq_cst(ptr %a, i64 %b) nounwind {
 }
 
 define i8 @atomicrmw_max_i8_seq_cst(ptr %a, i8 %b) nounwind {
+; LA32-LABEL: atomicrmw_max_i8_seq_cst:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $a2, $zero, -4
+; LA32-NEXT:    and $a2, $a0, $a2
+; LA32-NEXT:    slli.w $a0, $a0, 3
+; LA32-NEXT:    andi $a3, $a0, 24
+; LA32-NEXT:    ori $a4, $zero, 255
+; LA32-NEXT:    sll.w $a4, $a4, $a0
+; LA32-NEXT:    slli.w $a1, $a1, 24
+; LA32-NEXT:    srai.w $a1, $a1, 24
+; LA32-NEXT:    sll.w $a1, $a1, $a0
+; LA32-NEXT:    xori $a3, $a3, 24
+; LA32-NEXT:  .LBB56_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a5, $a2, 0
+; LA32-NEXT:    and $a7, $a5, $a4
+; LA32-NEXT:    move $a6, $a5
+; LA32-NEXT:    sll.w $a7, $a7, $a3
+; LA32-NEXT:    sra.w $a7, $a7, $a3
+; LA32-NEXT:    bge $a7, $a1, .LBB56_3
+; LA32-NEXT:  # %bb.2: # in Loop: Header=BB56_1 Depth=1
+; LA32-NEXT:    xor $a6, $a5, $a1
+; LA32-NEXT:    and $a6, $a6, $a4
+; LA32-NEXT:    xor $a6, $a5, $a6
+; LA32-NEXT:  .LBB56_3: # in Loop: Header=BB56_1 Depth=1
+; LA32-NEXT:    sc.w $a6, $a2, 0
+; LA32-NEXT:    beq $a6, $zero, .LBB56_1
+; LA32-NEXT:  # %bb.4:
+; LA32-NEXT:    srl.w $a0, $a5, $a0
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_max_i8_seq_cst:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    slli.d $a2, $a0, 3
@@ -1169,6 +3024,38 @@ define i8 @atomicrmw_max_i8_seq_cst(ptr %a, i8 %b) nounwind {
 }
 
 define i16 @atomicrmw_max_i16_seq_cst(ptr %a, i16 %b) nounwind {
+; LA32-LABEL: atomicrmw_max_i16_seq_cst:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $a2, $zero, -4
+; LA32-NEXT:    and $a2, $a0, $a2
+; LA32-NEXT:    slli.w $a0, $a0, 3
+; LA32-NEXT:    andi $a3, $a0, 24
+; LA32-NEXT:    lu12i.w $a4, 15
+; LA32-NEXT:    ori $a4, $a4, 4095
+; LA32-NEXT:    sll.w $a4, $a4, $a0
+; LA32-NEXT:    slli.w $a1, $a1, 16
+; LA32-NEXT:    srai.w $a1, $a1, 16
+; LA32-NEXT:    sll.w $a1, $a1, $a0
+; LA32-NEXT:    ori $a5, $zero, 16
+; LA32-NEXT:    sub.w $a3, $a5, $a3
+; LA32-NEXT:  .LBB57_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a5, $a2, 0
+; LA32-NEXT:    and $a7, $a5, $a4
+; LA32-NEXT:    move $a6, $a5
+; LA32-NEXT:    sll.w $a7, $a7, $a3
+; LA32-NEXT:    sra.w $a7, $a7, $a3
+; LA32-NEXT:    bge $a7, $a1, .LBB57_3
+; LA32-NEXT:  # %bb.2: # in Loop: Header=BB57_1 Depth=1
+; LA32-NEXT:    xor $a6, $a5, $a1
+; LA32-NEXT:    and $a6, $a6, $a4
+; LA32-NEXT:    xor $a6, $a5, $a6
+; LA32-NEXT:  .LBB57_3: # in Loop: Header=BB57_1 Depth=1
+; LA32-NEXT:    sc.w $a6, $a2, 0
+; LA32-NEXT:    beq $a6, $zero, .LBB57_1
+; LA32-NEXT:  # %bb.4:
+; LA32-NEXT:    srl.w $a0, $a5, $a0
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_max_i16_seq_cst:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    slli.d $a2, $a0, 3
@@ -1203,6 +3090,21 @@ define i16 @atomicrmw_max_i16_seq_cst(ptr %a, i16 %b) nounwind {
 }
 
 define i32 @atomicrmw_max_i32_seq_cst(ptr %a, i32 %b) nounwind {
+; LA32-LABEL: atomicrmw_max_i32_seq_cst:
+; LA32:       # %bb.0:
+; LA32-NEXT:  .LBB58_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a2, $a0, 0
+; LA32-NEXT:    move $a3, $a2
+; LA32-NEXT:    bge $a2, $a1, .LBB58_3
+; LA32-NEXT:  # %bb.2: # in Loop: Header=BB58_1 Depth=1
+; LA32-NEXT:    move $a3, $a1
+; LA32-NEXT:  .LBB58_3: # in Loop: Header=BB58_1 Depth=1
+; LA32-NEXT:    sc.w $a3, $a0, 0
+; LA32-NEXT:    beq $a3, $zero, .LBB58_1
+; LA32-NEXT:  # %bb.4:
+; LA32-NEXT:    move $a0, $a2
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_max_i32_seq_cst:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ammax_db.w $a2, $a1, $a0
@@ -1213,6 +3115,65 @@ define i32 @atomicrmw_max_i32_seq_cst(ptr %a, i32 %b) nounwind {
 }
 
 define i64 @atomicrmw_max_i64_seq_cst(ptr %a, i64 %b) nounwind {
+; LA32-LABEL: atomicrmw_max_i64_seq_cst:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $sp, $sp, -32
+; LA32-NEXT:    st.w $ra, $sp, 28 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $fp, $sp, 24 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s0, $sp, 20 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s1, $sp, 16 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s2, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    move $fp, $a0
+; LA32-NEXT:    ld.w $a5, $a0, 4
+; LA32-NEXT:    ld.w $a4, $a0, 0
+; LA32-NEXT:    move $s0, $a2
+; LA32-NEXT:    move $s1, $a1
+; LA32-NEXT:    addi.w $s2, $sp, 0
+; LA32-NEXT:    b .LBB59_2
+; LA32-NEXT:    .p2align 4, , 16
+; LA32-NEXT:  .LBB59_1: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB59_2 Depth=1
+; LA32-NEXT:    st.w $a4, $sp, 0
+; LA32-NEXT:    st.w $a5, $sp, 4
+; LA32-NEXT:    ori $a4, $zero, 5
+; LA32-NEXT:    ori $a5, $zero, 5
+; LA32-NEXT:    move $a0, $fp
+; LA32-NEXT:    move $a1, $s2
+; LA32-NEXT:    bl __atomic_compare_exchange_8
+; LA32-NEXT:    ld.w $a5, $sp, 4
+; LA32-NEXT:    ld.w $a4, $sp, 0
+; LA32-NEXT:    bne $a0, $zero, .LBB59_7
+; LA32-NEXT:  .LBB59_2: # %atomicrmw.start
+; LA32-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    beq $a5, $s0, .LBB59_4
+; LA32-NEXT:  # %bb.3: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB59_2 Depth=1
+; LA32-NEXT:    slt $a0, $s0, $a5
+; LA32-NEXT:    b .LBB59_5
+; LA32-NEXT:    .p2align 4, , 16
+; LA32-NEXT:  .LBB59_4: # in Loop: Header=BB59_2 Depth=1
+; LA32-NEXT:    sltu $a0, $s1, $a4
+; LA32-NEXT:  .LBB59_5: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB59_2 Depth=1
+; LA32-NEXT:    move $a2, $a4
+; LA32-NEXT:    move $a3, $a5
+; LA32-NEXT:    bne $a0, $zero, .LBB59_1
+; LA32-NEXT:  # %bb.6: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB59_2 Depth=1
+; LA32-NEXT:    move $a2, $s1
+; LA32-NEXT:    move $a3, $s0
+; LA32-NEXT:    b .LBB59_1
+; LA32-NEXT:  .LBB59_7: # %atomicrmw.end
+; LA32-NEXT:    move $a0, $a4
+; LA32-NEXT:    move $a1, $a5
+; LA32-NEXT:    ld.w $s2, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $s1, $sp, 16 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $s0, $sp, 20 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $fp, $sp, 24 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $ra, $sp, 28 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 32
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_max_i64_seq_cst:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ammax_db.d $a2, $a1, $a0
@@ -1223,6 +3184,36 @@ define i64 @atomicrmw_max_i64_seq_cst(ptr %a, i64 %b) nounwind {
 }
 
 define i8 @atomicrmw_min_i8_seq_cst(ptr %a, i8 %b) nounwind {
+; LA32-LABEL: atomicrmw_min_i8_seq_cst:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $a2, $zero, -4
+; LA32-NEXT:    and $a2, $a0, $a2
+; LA32-NEXT:    slli.w $a0, $a0, 3
+; LA32-NEXT:    andi $a3, $a0, 24
+; LA32-NEXT:    ori $a4, $zero, 255
+; LA32-NEXT:    sll.w $a4, $a4, $a0
+; LA32-NEXT:    slli.w $a1, $a1, 24
+; LA32-NEXT:    srai.w $a1, $a1, 24
+; LA32-NEXT:    sll.w $a1, $a1, $a0
+; LA32-NEXT:    xori $a3, $a3, 24
+; LA32-NEXT:  .LBB60_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a5, $a2, 0
+; LA32-NEXT:    and $a7, $a5, $a4
+; LA32-NEXT:    move $a6, $a5
+; LA32-NEXT:    sll.w $a7, $a7, $a3
+; LA32-NEXT:    sra.w $a7, $a7, $a3
+; LA32-NEXT:    bge $a1, $a7, .LBB60_3
+; LA32-NEXT:  # %bb.2: # in Loop: Header=BB60_1 Depth=1
+; LA32-NEXT:    xor $a6, $a5, $a1
+; LA32-NEXT:    and $a6, $a6, $a4
+; LA32-NEXT:    xor $a6, $a5, $a6
+; LA32-NEXT:  .LBB60_3: # in Loop: Header=BB60_1 Depth=1
+; LA32-NEXT:    sc.w $a6, $a2, 0
+; LA32-NEXT:    beq $a6, $zero, .LBB60_1
+; LA32-NEXT:  # %bb.4:
+; LA32-NEXT:    srl.w $a0, $a5, $a0
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_min_i8_seq_cst:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    slli.d $a2, $a0, 3
@@ -1255,6 +3246,38 @@ define i8 @atomicrmw_min_i8_seq_cst(ptr %a, i8 %b) nounwind {
 }
 
 define i16 @atomicrmw_min_i16_seq_cst(ptr %a, i16 %b) nounwind {
+; LA32-LABEL: atomicrmw_min_i16_seq_cst:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $a2, $zero, -4
+; LA32-NEXT:    and $a2, $a0, $a2
+; LA32-NEXT:    slli.w $a0, $a0, 3
+; LA32-NEXT:    andi $a3, $a0, 24
+; LA32-NEXT:    lu12i.w $a4, 15
+; LA32-NEXT:    ori $a4, $a4, 4095
+; LA32-NEXT:    sll.w $a4, $a4, $a0
+; LA32-NEXT:    slli.w $a1, $a1, 16
+; LA32-NEXT:    srai.w $a1, $a1, 16
+; LA32-NEXT:    sll.w $a1, $a1, $a0
+; LA32-NEXT:    ori $a5, $zero, 16
+; LA32-NEXT:    sub.w $a3, $a5, $a3
+; LA32-NEXT:  .LBB61_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a5, $a2, 0
+; LA32-NEXT:    and $a7, $a5, $a4
+; LA32-NEXT:    move $a6, $a5
+; LA32-NEXT:    sll.w $a7, $a7, $a3
+; LA32-NEXT:    sra.w $a7, $a7, $a3
+; LA32-NEXT:    bge $a1, $a7, .LBB61_3
+; LA32-NEXT:  # %bb.2: # in Loop: Header=BB61_1 Depth=1
+; LA32-NEXT:    xor $a6, $a5, $a1
+; LA32-NEXT:    and $a6, $a6, $a4
+; LA32-NEXT:    xor $a6, $a5, $a6
+; LA32-NEXT:  .LBB61_3: # in Loop: Header=BB61_1 Depth=1
+; LA32-NEXT:    sc.w $a6, $a2, 0
+; LA32-NEXT:    beq $a6, $zero, .LBB61_1
+; LA32-NEXT:  # %bb.4:
+; LA32-NEXT:    srl.w $a0, $a5, $a0
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_min_i16_seq_cst:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    slli.d $a2, $a0, 3
@@ -1289,6 +3312,21 @@ define i16 @atomicrmw_min_i16_seq_cst(ptr %a, i16 %b) nounwind {
 }
 
 define i32 @atomicrmw_min_i32_seq_cst(ptr %a, i32 %b) nounwind {
+; LA32-LABEL: atomicrmw_min_i32_seq_cst:
+; LA32:       # %bb.0:
+; LA32-NEXT:  .LBB62_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a2, $a0, 0
+; LA32-NEXT:    move $a3, $a2
+; LA32-NEXT:    bge $a1, $a2, .LBB62_3
+; LA32-NEXT:  # %bb.2: # in Loop: Header=BB62_1 Depth=1
+; LA32-NEXT:    move $a3, $a1
+; LA32-NEXT:  .LBB62_3: # in Loop: Header=BB62_1 Depth=1
+; LA32-NEXT:    sc.w $a3, $a0, 0
+; LA32-NEXT:    beq $a3, $zero, .LBB62_1
+; LA32-NEXT:  # %bb.4:
+; LA32-NEXT:    move $a0, $a2
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_min_i32_seq_cst:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ammin_db.w $a2, $a1, $a0
@@ -1299,6 +3337,66 @@ define i32 @atomicrmw_min_i32_seq_cst(ptr %a, i32 %b) nounwind {
 }
 
 define i64 @atomicrmw_min_i64_seq_cst(ptr %a, i64 %b) nounwind {
+; LA32-LABEL: atomicrmw_min_i64_seq_cst:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $sp, $sp, -32
+; LA32-NEXT:    st.w $ra, $sp, 28 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $fp, $sp, 24 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s0, $sp, 20 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s1, $sp, 16 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s2, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    move $fp, $a0
+; LA32-NEXT:    ld.w $a5, $a0, 4
+; LA32-NEXT:    ld.w $a4, $a0, 0
+; LA32-NEXT:    move $s0, $a2
+; LA32-NEXT:    move $s1, $a1
+; LA32-NEXT:    addi.w $s2, $sp, 0
+; LA32-NEXT:    b .LBB63_2
+; LA32-NEXT:    .p2align 4, , 16
+; LA32-NEXT:  .LBB63_1: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB63_2 Depth=1
+; LA32-NEXT:    st.w $a4, $sp, 0
+; LA32-NEXT:    st.w $a5, $sp, 4
+; LA32-NEXT:    ori $a4, $zero, 5
+; LA32-NEXT:    ori $a5, $zero, 5
+; LA32-NEXT:    move $a0, $fp
+; LA32-NEXT:    move $a1, $s2
+; LA32-NEXT:    bl __atomic_compare_exchange_8
+; LA32-NEXT:    ld.w $a5, $sp, 4
+; LA32-NEXT:    ld.w $a4, $sp, 0
+; LA32-NEXT:    bne $a0, $zero, .LBB63_7
+; LA32-NEXT:  .LBB63_2: # %atomicrmw.start
+; LA32-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    beq $a5, $s0, .LBB63_4
+; LA32-NEXT:  # %bb.3: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB63_2 Depth=1
+; LA32-NEXT:    slt $a0, $s0, $a5
+; LA32-NEXT:    b .LBB63_5
+; LA32-NEXT:    .p2align 4, , 16
+; LA32-NEXT:  .LBB63_4: # in Loop: Header=BB63_2 Depth=1
+; LA32-NEXT:    sltu $a0, $s1, $a4
+; LA32-NEXT:  .LBB63_5: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB63_2 Depth=1
+; LA32-NEXT:    xori $a0, $a0, 1
+; LA32-NEXT:    move $a2, $a4
+; LA32-NEXT:    move $a3, $a5
+; LA32-NEXT:    bne $a0, $zero, .LBB63_1
+; LA32-NEXT:  # %bb.6: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB63_2 Depth=1
+; LA32-NEXT:    move $a2, $s1
+; LA32-NEXT:    move $a3, $s0
+; LA32-NEXT:    b .LBB63_1
+; LA32-NEXT:  .LBB63_7: # %atomicrmw.end
+; LA32-NEXT:    move $a0, $a4
+; LA32-NEXT:    move $a1, $a5
+; LA32-NEXT:    ld.w $s2, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $s1, $sp, 16 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $s0, $sp, 20 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $fp, $sp, 24 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $ra, $sp, 28 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 32
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_min_i64_seq_cst:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ammin_db.d $a2, $a1, $a0
@@ -1309,6 +3407,31 @@ define i64 @atomicrmw_min_i64_seq_cst(ptr %a, i64 %b) nounwind {
 }
 
 define i8 @atomicrmw_umax_i8_monotonic(ptr %a, i8 %b) nounwind {
+; LA32-LABEL: atomicrmw_umax_i8_monotonic:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $a2, $zero, -4
+; LA32-NEXT:    and $a2, $a0, $a2
+; LA32-NEXT:    slli.w $a0, $a0, 3
+; LA32-NEXT:    ori $a3, $zero, 255
+; LA32-NEXT:    sll.w $a3, $a3, $a0
+; LA32-NEXT:    andi $a1, $a1, 255
+; LA32-NEXT:    sll.w $a1, $a1, $a0
+; LA32-NEXT:  .LBB64_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a4, $a2, 0
+; LA32-NEXT:    and $a6, $a4, $a3
+; LA32-NEXT:    move $a5, $a4
+; LA32-NEXT:    bgeu $a6, $a1, .LBB64_3
+; LA32-NEXT:  # %bb.2: # in Loop: Header=BB64_1 Depth=1
+; LA32-NEXT:    xor $a5, $a4, $a1
+; LA32-NEXT:    and $a5, $a5, $a3
+; LA32-NEXT:    xor $a5, $a4, $a5
+; LA32-NEXT:  .LBB64_3: # in Loop: Header=BB64_1 Depth=1
+; LA32-NEXT:    sc.w $a5, $a2, 0
+; LA32-NEXT:    beq $a5, $zero, .LBB64_1
+; LA32-NEXT:  # %bb.4:
+; LA32-NEXT:    srl.w $a0, $a4, $a0
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_umax_i8_monotonic:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    slli.d $a2, $a0, 3
@@ -1337,6 +3460,32 @@ define i8 @atomicrmw_umax_i8_monotonic(ptr %a, i8 %b) nounwind {
 }
 
 define i16 @atomicrmw_umax_i16_monotonic(ptr %a, i16 %b) nounwind {
+; LA32-LABEL: atomicrmw_umax_i16_monotonic:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $a2, $zero, -4
+; LA32-NEXT:    and $a2, $a0, $a2
+; LA32-NEXT:    slli.w $a0, $a0, 3
+; LA32-NEXT:    lu12i.w $a3, 15
+; LA32-NEXT:    ori $a3, $a3, 4095
+; LA32-NEXT:    sll.w $a4, $a3, $a0
+; LA32-NEXT:    and $a1, $a1, $a3
+; LA32-NEXT:    sll.w $a1, $a1, $a0
+; LA32-NEXT:  .LBB65_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a3, $a2, 0
+; LA32-NEXT:    and $a6, $a3, $a4
+; LA32-NEXT:    move $a5, $a3
+; LA32-NEXT:    bgeu $a6, $a1, .LBB65_3
+; LA32-NEXT:  # %bb.2: # in Loop: Header=BB65_1 Depth=1
+; LA32-NEXT:    xor $a5, $a3, $a1
+; LA32-NEXT:    and $a5, $a5, $a4
+; LA32-NEXT:    xor $a5, $a3, $a5
+; LA32-NEXT:  .LBB65_3: # in Loop: Header=BB65_1 Depth=1
+; LA32-NEXT:    sc.w $a5, $a2, 0
+; LA32-NEXT:    beq $a5, $zero, .LBB65_1
+; LA32-NEXT:  # %bb.4:
+; LA32-NEXT:    srl.w $a0, $a3, $a0
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_umax_i16_monotonic:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    slli.d $a2, $a0, 3
@@ -1366,6 +3515,21 @@ define i16 @atomicrmw_umax_i16_monotonic(ptr %a, i16 %b) nounwind {
 }
 
 define i32 @atomicrmw_umax_i32_monotonic(ptr %a, i32 %b) nounwind {
+; LA32-LABEL: atomicrmw_umax_i32_monotonic:
+; LA32:       # %bb.0:
+; LA32-NEXT:  .LBB66_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a2, $a0, 0
+; LA32-NEXT:    move $a3, $a2
+; LA32-NEXT:    bgeu $a2, $a1, .LBB66_3
+; LA32-NEXT:  # %bb.2: # in Loop: Header=BB66_1 Depth=1
+; LA32-NEXT:    move $a3, $a1
+; LA32-NEXT:  .LBB66_3: # in Loop: Header=BB66_1 Depth=1
+; LA32-NEXT:    sc.w $a3, $a0, 0
+; LA32-NEXT:    beq $a3, $zero, .LBB66_1
+; LA32-NEXT:  # %bb.4:
+; LA32-NEXT:    move $a0, $a2
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_umax_i32_monotonic:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ammax.wu $a2, $a1, $a0
@@ -1376,6 +3540,65 @@ define i32 @atomicrmw_umax_i32_monotonic(ptr %a, i32 %b) nounwind {
 }
 
 define i64 @atomicrmw_umax_i64_monotonic(ptr %a, i64 %b) nounwind {
+; LA32-LABEL: atomicrmw_umax_i64_monotonic:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $sp, $sp, -32
+; LA32-NEXT:    st.w $ra, $sp, 28 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $fp, $sp, 24 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s0, $sp, 20 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s1, $sp, 16 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s2, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    move $fp, $a0
+; LA32-NEXT:    ld.w $a5, $a0, 4
+; LA32-NEXT:    ld.w $a4, $a0, 0
+; LA32-NEXT:    move $s0, $a2
+; LA32-NEXT:    move $s1, $a1
+; LA32-NEXT:    addi.w $s2, $sp, 0
+; LA32-NEXT:    b .LBB67_2
+; LA32-NEXT:    .p2align 4, , 16
+; LA32-NEXT:  .LBB67_1: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB67_2 Depth=1
+; LA32-NEXT:    st.w $a4, $sp, 0
+; LA32-NEXT:    st.w $a5, $sp, 4
+; LA32-NEXT:    move $a0, $fp
+; LA32-NEXT:    move $a1, $s2
+; LA32-NEXT:    move $a4, $zero
+; LA32-NEXT:    move $a5, $zero
+; LA32-NEXT:    bl __atomic_compare_exchange_8
+; LA32-NEXT:    ld.w $a5, $sp, 4
+; LA32-NEXT:    ld.w $a4, $sp, 0
+; LA32-NEXT:    bne $a0, $zero, .LBB67_7
+; LA32-NEXT:  .LBB67_2: # %atomicrmw.start
+; LA32-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    beq $a5, $s0, .LBB67_4
+; LA32-NEXT:  # %bb.3: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB67_2 Depth=1
+; LA32-NEXT:    sltu $a0, $s0, $a5
+; LA32-NEXT:    b .LBB67_5
+; LA32-NEXT:    .p2align 4, , 16
+; LA32-NEXT:  .LBB67_4: # in Loop: Header=BB67_2 Depth=1
+; LA32-NEXT:    sltu $a0, $s1, $a4
+; LA32-NEXT:  .LBB67_5: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB67_2 Depth=1
+; LA32-NEXT:    move $a2, $a4
+; LA32-NEXT:    move $a3, $a5
+; LA32-NEXT:    bne $a0, $zero, .LBB67_1
+; LA32-NEXT:  # %bb.6: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB67_2 Depth=1
+; LA32-NEXT:    move $a2, $s1
+; LA32-NEXT:    move $a3, $s0
+; LA32-NEXT:    b .LBB67_1
+; LA32-NEXT:  .LBB67_7: # %atomicrmw.end
+; LA32-NEXT:    move $a0, $a4
+; LA32-NEXT:    move $a1, $a5
+; LA32-NEXT:    ld.w $s2, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $s1, $sp, 16 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $s0, $sp, 20 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $fp, $sp, 24 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $ra, $sp, 28 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 32
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_umax_i64_monotonic:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ammax.du $a2, $a1, $a0
@@ -1386,6 +3609,31 @@ define i64 @atomicrmw_umax_i64_monotonic(ptr %a, i64 %b) nounwind {
 }
 
 define i8 @atomicrmw_umin_i8_monotonic(ptr %a, i8 %b) nounwind {
+; LA32-LABEL: atomicrmw_umin_i8_monotonic:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $a2, $zero, -4
+; LA32-NEXT:    and $a2, $a0, $a2
+; LA32-NEXT:    slli.w $a0, $a0, 3
+; LA32-NEXT:    ori $a3, $zero, 255
+; LA32-NEXT:    sll.w $a3, $a3, $a0
+; LA32-NEXT:    andi $a1, $a1, 255
+; LA32-NEXT:    sll.w $a1, $a1, $a0
+; LA32-NEXT:  .LBB68_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a4, $a2, 0
+; LA32-NEXT:    and $a6, $a4, $a3
+; LA32-NEXT:    move $a5, $a4
+; LA32-NEXT:    bgeu $a1, $a6, .LBB68_3
+; LA32-NEXT:  # %bb.2: # in Loop: Header=BB68_1 Depth=1
+; LA32-NEXT:    xor $a5, $a4, $a1
+; LA32-NEXT:    and $a5, $a5, $a3
+; LA32-NEXT:    xor $a5, $a4, $a5
+; LA32-NEXT:  .LBB68_3: # in Loop: Header=BB68_1 Depth=1
+; LA32-NEXT:    sc.w $a5, $a2, 0
+; LA32-NEXT:    beq $a5, $zero, .LBB68_1
+; LA32-NEXT:  # %bb.4:
+; LA32-NEXT:    srl.w $a0, $a4, $a0
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_umin_i8_monotonic:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    slli.d $a2, $a0, 3
@@ -1414,6 +3662,32 @@ define i8 @atomicrmw_umin_i8_monotonic(ptr %a, i8 %b) nounwind {
 }
 
 define i16 @atomicrmw_umin_i16_monotonic(ptr %a, i16 %b) nounwind {
+; LA32-LABEL: atomicrmw_umin_i16_monotonic:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $a2, $zero, -4
+; LA32-NEXT:    and $a2, $a0, $a2
+; LA32-NEXT:    slli.w $a0, $a0, 3
+; LA32-NEXT:    lu12i.w $a3, 15
+; LA32-NEXT:    ori $a3, $a3, 4095
+; LA32-NEXT:    sll.w $a4, $a3, $a0
+; LA32-NEXT:    and $a1, $a1, $a3
+; LA32-NEXT:    sll.w $a1, $a1, $a0
+; LA32-NEXT:  .LBB69_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a3, $a2, 0
+; LA32-NEXT:    and $a6, $a3, $a4
+; LA32-NEXT:    move $a5, $a3
+; LA32-NEXT:    bgeu $a1, $a6, .LBB69_3
+; LA32-NEXT:  # %bb.2: # in Loop: Header=BB69_1 Depth=1
+; LA32-NEXT:    xor $a5, $a3, $a1
+; LA32-NEXT:    and $a5, $a5, $a4
+; LA32-NEXT:    xor $a5, $a3, $a5
+; LA32-NEXT:  .LBB69_3: # in Loop: Header=BB69_1 Depth=1
+; LA32-NEXT:    sc.w $a5, $a2, 0
+; LA32-NEXT:    beq $a5, $zero, .LBB69_1
+; LA32-NEXT:  # %bb.4:
+; LA32-NEXT:    srl.w $a0, $a3, $a0
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_umin_i16_monotonic:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    slli.d $a2, $a0, 3
@@ -1443,6 +3717,21 @@ define i16 @atomicrmw_umin_i16_monotonic(ptr %a, i16 %b) nounwind {
 }
 
 define i32 @atomicrmw_umin_i32_monotonic(ptr %a, i32 %b) nounwind {
+; LA32-LABEL: atomicrmw_umin_i32_monotonic:
+; LA32:       # %bb.0:
+; LA32-NEXT:  .LBB70_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a2, $a0, 0
+; LA32-NEXT:    move $a3, $a2
+; LA32-NEXT:    bgeu $a1, $a2, .LBB70_3
+; LA32-NEXT:  # %bb.2: # in Loop: Header=BB70_1 Depth=1
+; LA32-NEXT:    move $a3, $a1
+; LA32-NEXT:  .LBB70_3: # in Loop: Header=BB70_1 Depth=1
+; LA32-NEXT:    sc.w $a3, $a0, 0
+; LA32-NEXT:    beq $a3, $zero, .LBB70_1
+; LA32-NEXT:  # %bb.4:
+; LA32-NEXT:    move $a0, $a2
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_umin_i32_monotonic:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ammin.wu $a2, $a1, $a0
@@ -1453,6 +3742,66 @@ define i32 @atomicrmw_umin_i32_monotonic(ptr %a, i32 %b) nounwind {
 }
 
 define i64 @atomicrmw_umin_i64_monotonic(ptr %a, i64 %b) nounwind {
+; LA32-LABEL: atomicrmw_umin_i64_monotonic:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $sp, $sp, -32
+; LA32-NEXT:    st.w $ra, $sp, 28 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $fp, $sp, 24 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s0, $sp, 20 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s1, $sp, 16 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s2, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    move $fp, $a0
+; LA32-NEXT:    ld.w $a5, $a0, 4
+; LA32-NEXT:    ld.w $a4, $a0, 0
+; LA32-NEXT:    move $s0, $a2
+; LA32-NEXT:    move $s1, $a1
+; LA32-NEXT:    addi.w $s2, $sp, 0
+; LA32-NEXT:    b .LBB71_2
+; LA32-NEXT:    .p2align 4, , 16
+; LA32-NEXT:  .LBB71_1: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB71_2 Depth=1
+; LA32-NEXT:    st.w $a4, $sp, 0
+; LA32-NEXT:    st.w $a5, $sp, 4
+; LA32-NEXT:    move $a0, $fp
+; LA32-NEXT:    move $a1, $s2
+; LA32-NEXT:    move $a4, $zero
+; LA32-NEXT:    move $a5, $zero
+; LA32-NEXT:    bl __atomic_compare_exchange_8
+; LA32-NEXT:    ld.w $a5, $sp, 4
+; LA32-NEXT:    ld.w $a4, $sp, 0
+; LA32-NEXT:    bne $a0, $zero, .LBB71_7
+; LA32-NEXT:  .LBB71_2: # %atomicrmw.start
+; LA32-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    beq $a5, $s0, .LBB71_4
+; LA32-NEXT:  # %bb.3: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB71_2 Depth=1
+; LA32-NEXT:    sltu $a0, $s0, $a5
+; LA32-NEXT:    b .LBB71_5
+; LA32-NEXT:    .p2align 4, , 16
+; LA32-NEXT:  .LBB71_4: # in Loop: Header=BB71_2 Depth=1
+; LA32-NEXT:    sltu $a0, $s1, $a4
+; LA32-NEXT:  .LBB71_5: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB71_2 Depth=1
+; LA32-NEXT:    xori $a0, $a0, 1
+; LA32-NEXT:    move $a2, $a4
+; LA32-NEXT:    move $a3, $a5
+; LA32-NEXT:    bne $a0, $zero, .LBB71_1
+; LA32-NEXT:  # %bb.6: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB71_2 Depth=1
+; LA32-NEXT:    move $a2, $s1
+; LA32-NEXT:    move $a3, $s0
+; LA32-NEXT:    b .LBB71_1
+; LA32-NEXT:  .LBB71_7: # %atomicrmw.end
+; LA32-NEXT:    move $a0, $a4
+; LA32-NEXT:    move $a1, $a5
+; LA32-NEXT:    ld.w $s2, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $s1, $sp, 16 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $s0, $sp, 20 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $fp, $sp, 24 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $ra, $sp, 28 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 32
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_umin_i64_monotonic:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ammin.du $a2, $a1, $a0
@@ -1463,6 +3812,36 @@ define i64 @atomicrmw_umin_i64_monotonic(ptr %a, i64 %b) nounwind {
 }
 
 define i8 @atomicrmw_max_i8_monotonic(ptr %a, i8 %b) nounwind {
+; LA32-LABEL: atomicrmw_max_i8_monotonic:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $a2, $zero, -4
+; LA32-NEXT:    and $a2, $a0, $a2
+; LA32-NEXT:    slli.w $a0, $a0, 3
+; LA32-NEXT:    andi $a3, $a0, 24
+; LA32-NEXT:    ori $a4, $zero, 255
+; LA32-NEXT:    sll.w $a4, $a4, $a0
+; LA32-NEXT:    slli.w $a1, $a1, 24
+; LA32-NEXT:    srai.w $a1, $a1, 24
+; LA32-NEXT:    sll.w $a1, $a1, $a0
+; LA32-NEXT:    xori $a3, $a3, 24
+; LA32-NEXT:  .LBB72_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a5, $a2, 0
+; LA32-NEXT:    and $a7, $a5, $a4
+; LA32-NEXT:    move $a6, $a5
+; LA32-NEXT:    sll.w $a7, $a7, $a3
+; LA32-NEXT:    sra.w $a7, $a7, $a3
+; LA32-NEXT:    bge $a7, $a1, .LBB72_3
+; LA32-NEXT:  # %bb.2: # in Loop: Header=BB72_1 Depth=1
+; LA32-NEXT:    xor $a6, $a5, $a1
+; LA32-NEXT:    and $a6, $a6, $a4
+; LA32-NEXT:    xor $a6, $a5, $a6
+; LA32-NEXT:  .LBB72_3: # in Loop: Header=BB72_1 Depth=1
+; LA32-NEXT:    sc.w $a6, $a2, 0
+; LA32-NEXT:    beq $a6, $zero, .LBB72_1
+; LA32-NEXT:  # %bb.4:
+; LA32-NEXT:    srl.w $a0, $a5, $a0
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_max_i8_monotonic:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    slli.d $a2, $a0, 3
@@ -1495,6 +3874,38 @@ define i8 @atomicrmw_max_i8_monotonic(ptr %a, i8 %b) nounwind {
 }
 
 define i16 @atomicrmw_max_i16_monotonic(ptr %a, i16 %b) nounwind {
+; LA32-LABEL: atomicrmw_max_i16_monotonic:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $a2, $zero, -4
+; LA32-NEXT:    and $a2, $a0, $a2
+; LA32-NEXT:    slli.w $a0, $a0, 3
+; LA32-NEXT:    andi $a3, $a0, 24
+; LA32-NEXT:    lu12i.w $a4, 15
+; LA32-NEXT:    ori $a4, $a4, 4095
+; LA32-NEXT:    sll.w $a4, $a4, $a0
+; LA32-NEXT:    slli.w $a1, $a1, 16
+; LA32-NEXT:    srai.w $a1, $a1, 16
+; LA32-NEXT:    sll.w $a1, $a1, $a0
+; LA32-NEXT:    ori $a5, $zero, 16
+; LA32-NEXT:    sub.w $a3, $a5, $a3
+; LA32-NEXT:  .LBB73_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a5, $a2, 0
+; LA32-NEXT:    and $a7, $a5, $a4
+; LA32-NEXT:    move $a6, $a5
+; LA32-NEXT:    sll.w $a7, $a7, $a3
+; LA32-NEXT:    sra.w $a7, $a7, $a3
+; LA32-NEXT:    bge $a7, $a1, .LBB73_3
+; LA32-NEXT:  # %bb.2: # in Loop: Header=BB73_1 Depth=1
+; LA32-NEXT:    xor $a6, $a5, $a1
+; LA32-NEXT:    and $a6, $a6, $a4
+; LA32-NEXT:    xor $a6, $a5, $a6
+; LA32-NEXT:  .LBB73_3: # in Loop: Header=BB73_1 Depth=1
+; LA32-NEXT:    sc.w $a6, $a2, 0
+; LA32-NEXT:    beq $a6, $zero, .LBB73_1
+; LA32-NEXT:  # %bb.4:
+; LA32-NEXT:    srl.w $a0, $a5, $a0
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_max_i16_monotonic:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    slli.d $a2, $a0, 3
@@ -1529,6 +3940,21 @@ define i16 @atomicrmw_max_i16_monotonic(ptr %a, i16 %b) nounwind {
 }
 
 define i32 @atomicrmw_max_i32_monotonic(ptr %a, i32 %b) nounwind {
+; LA32-LABEL: atomicrmw_max_i32_monotonic:
+; LA32:       # %bb.0:
+; LA32-NEXT:  .LBB74_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a2, $a0, 0
+; LA32-NEXT:    move $a3, $a2
+; LA32-NEXT:    bge $a2, $a1, .LBB74_3
+; LA32-NEXT:  # %bb.2: # in Loop: Header=BB74_1 Depth=1
+; LA32-NEXT:    move $a3, $a1
+; LA32-NEXT:  .LBB74_3: # in Loop: Header=BB74_1 Depth=1
+; LA32-NEXT:    sc.w $a3, $a0, 0
+; LA32-NEXT:    beq $a3, $zero, .LBB74_1
+; LA32-NEXT:  # %bb.4:
+; LA32-NEXT:    move $a0, $a2
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_max_i32_monotonic:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ammax.w $a2, $a1, $a0
@@ -1539,6 +3965,65 @@ define i32 @atomicrmw_max_i32_monotonic(ptr %a, i32 %b) nounwind {
 }
 
 define i64 @atomicrmw_max_i64_monotonic(ptr %a, i64 %b) nounwind {
+; LA32-LABEL: atomicrmw_max_i64_monotonic:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $sp, $sp, -32
+; LA32-NEXT:    st.w $ra, $sp, 28 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $fp, $sp, 24 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s0, $sp, 20 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s1, $sp, 16 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s2, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    move $fp, $a0
+; LA32-NEXT:    ld.w $a5, $a0, 4
+; LA32-NEXT:    ld.w $a4, $a0, 0
+; LA32-NEXT:    move $s0, $a2
+; LA32-NEXT:    move $s1, $a1
+; LA32-NEXT:    addi.w $s2, $sp, 0
+; LA32-NEXT:    b .LBB75_2
+; LA32-NEXT:    .p2align 4, , 16
+; LA32-NEXT:  .LBB75_1: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB75_2 Depth=1
+; LA32-NEXT:    st.w $a4, $sp, 0
+; LA32-NEXT:    st.w $a5, $sp, 4
+; LA32-NEXT:    move $a0, $fp
+; LA32-NEXT:    move $a1, $s2
+; LA32-NEXT:    move $a4, $zero
+; LA32-NEXT:    move $a5, $zero
+; LA32-NEXT:    bl __atomic_compare_exchange_8
+; LA32-NEXT:    ld.w $a5, $sp, 4
+; LA32-NEXT:    ld.w $a4, $sp, 0
+; LA32-NEXT:    bne $a0, $zero, .LBB75_7
+; LA32-NEXT:  .LBB75_2: # %atomicrmw.start
+; LA32-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    beq $a5, $s0, .LBB75_4
+; LA32-NEXT:  # %bb.3: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB75_2 Depth=1
+; LA32-NEXT:    slt $a0, $s0, $a5
+; LA32-NEXT:    b .LBB75_5
+; LA32-NEXT:    .p2align 4, , 16
+; LA32-NEXT:  .LBB75_4: # in Loop: Header=BB75_2 Depth=1
+; LA32-NEXT:    sltu $a0, $s1, $a4
+; LA32-NEXT:  .LBB75_5: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB75_2 Depth=1
+; LA32-NEXT:    move $a2, $a4
+; LA32-NEXT:    move $a3, $a5
+; LA32-NEXT:    bne $a0, $zero, .LBB75_1
+; LA32-NEXT:  # %bb.6: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB75_2 Depth=1
+; LA32-NEXT:    move $a2, $s1
+; LA32-NEXT:    move $a3, $s0
+; LA32-NEXT:    b .LBB75_1
+; LA32-NEXT:  .LBB75_7: # %atomicrmw.end
+; LA32-NEXT:    move $a0, $a4
+; LA32-NEXT:    move $a1, $a5
+; LA32-NEXT:    ld.w $s2, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $s1, $sp, 16 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $s0, $sp, 20 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $fp, $sp, 24 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $ra, $sp, 28 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 32
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_max_i64_monotonic:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ammax.d $a2, $a1, $a0
@@ -1549,6 +4034,36 @@ define i64 @atomicrmw_max_i64_monotonic(ptr %a, i64 %b) nounwind {
 }
 
 define i8 @atomicrmw_min_i8_monotonic(ptr %a, i8 %b) nounwind {
+; LA32-LABEL: atomicrmw_min_i8_monotonic:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $a2, $zero, -4
+; LA32-NEXT:    and $a2, $a0, $a2
+; LA32-NEXT:    slli.w $a0, $a0, 3
+; LA32-NEXT:    andi $a3, $a0, 24
+; LA32-NEXT:    ori $a4, $zero, 255
+; LA32-NEXT:    sll.w $a4, $a4, $a0
+; LA32-NEXT:    slli.w $a1, $a1, 24
+; LA32-NEXT:    srai.w $a1, $a1, 24
+; LA32-NEXT:    sll.w $a1, $a1, $a0
+; LA32-NEXT:    xori $a3, $a3, 24
+; LA32-NEXT:  .LBB76_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a5, $a2, 0
+; LA32-NEXT:    and $a7, $a5, $a4
+; LA32-NEXT:    move $a6, $a5
+; LA32-NEXT:    sll.w $a7, $a7, $a3
+; LA32-NEXT:    sra.w $a7, $a7, $a3
+; LA32-NEXT:    bge $a1, $a7, .LBB76_3
+; LA32-NEXT:  # %bb.2: # in Loop: Header=BB76_1 Depth=1
+; LA32-NEXT:    xor $a6, $a5, $a1
+; LA32-NEXT:    and $a6, $a6, $a4
+; LA32-NEXT:    xor $a6, $a5, $a6
+; LA32-NEXT:  .LBB76_3: # in Loop: Header=BB76_1 Depth=1
+; LA32-NEXT:    sc.w $a6, $a2, 0
+; LA32-NEXT:    beq $a6, $zero, .LBB76_1
+; LA32-NEXT:  # %bb.4:
+; LA32-NEXT:    srl.w $a0, $a5, $a0
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_min_i8_monotonic:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    slli.d $a2, $a0, 3
@@ -1581,6 +4096,38 @@ define i8 @atomicrmw_min_i8_monotonic(ptr %a, i8 %b) nounwind {
 }
 
 define i16 @atomicrmw_min_i16_monotonic(ptr %a, i16 %b) nounwind {
+; LA32-LABEL: atomicrmw_min_i16_monotonic:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $a2, $zero, -4
+; LA32-NEXT:    and $a2, $a0, $a2
+; LA32-NEXT:    slli.w $a0, $a0, 3
+; LA32-NEXT:    andi $a3, $a0, 24
+; LA32-NEXT:    lu12i.w $a4, 15
+; LA32-NEXT:    ori $a4, $a4, 4095
+; LA32-NEXT:    sll.w $a4, $a4, $a0
+; LA32-NEXT:    slli.w $a1, $a1, 16
+; LA32-NEXT:    srai.w $a1, $a1, 16
+; LA32-NEXT:    sll.w $a1, $a1, $a0
+; LA32-NEXT:    ori $a5, $zero, 16
+; LA32-NEXT:    sub.w $a3, $a5, $a3
+; LA32-NEXT:  .LBB77_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a5, $a2, 0
+; LA32-NEXT:    and $a7, $a5, $a4
+; LA32-NEXT:    move $a6, $a5
+; LA32-NEXT:    sll.w $a7, $a7, $a3
+; LA32-NEXT:    sra.w $a7, $a7, $a3
+; LA32-NEXT:    bge $a1, $a7, .LBB77_3
+; LA32-NEXT:  # %bb.2: # in Loop: Header=BB77_1 Depth=1
+; LA32-NEXT:    xor $a6, $a5, $a1
+; LA32-NEXT:    and $a6, $a6, $a4
+; LA32-NEXT:    xor $a6, $a5, $a6
+; LA32-NEXT:  .LBB77_3: # in Loop: Header=BB77_1 Depth=1
+; LA32-NEXT:    sc.w $a6, $a2, 0
+; LA32-NEXT:    beq $a6, $zero, .LBB77_1
+; LA32-NEXT:  # %bb.4:
+; LA32-NEXT:    srl.w $a0, $a5, $a0
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_min_i16_monotonic:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    slli.d $a2, $a0, 3
@@ -1615,6 +4162,21 @@ define i16 @atomicrmw_min_i16_monotonic(ptr %a, i16 %b) nounwind {
 }
 
 define i32 @atomicrmw_min_i32_monotonic(ptr %a, i32 %b) nounwind {
+; LA32-LABEL: atomicrmw_min_i32_monotonic:
+; LA32:       # %bb.0:
+; LA32-NEXT:  .LBB78_1: # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ll.w $a2, $a0, 0
+; LA32-NEXT:    move $a3, $a2
+; LA32-NEXT:    bge $a1, $a2, .LBB78_3
+; LA32-NEXT:  # %bb.2: # in Loop: Header=BB78_1 Depth=1
+; LA32-NEXT:    move $a3, $a1
+; LA32-NEXT:  .LBB78_3: # in Loop: Header=BB78_1 Depth=1
+; LA32-NEXT:    sc.w $a3, $a0, 0
+; LA32-NEXT:    beq $a3, $zero, .LBB78_1
+; LA32-NEXT:  # %bb.4:
+; LA32-NEXT:    move $a0, $a2
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_min_i32_monotonic:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ammin.w $a2, $a1, $a0
@@ -1625,6 +4187,66 @@ define i32 @atomicrmw_min_i32_monotonic(ptr %a, i32 %b) nounwind {
 }
 
 define i64 @atomicrmw_min_i64_monotonic(ptr %a, i64 %b) nounwind {
+; LA32-LABEL: atomicrmw_min_i64_monotonic:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $sp, $sp, -32
+; LA32-NEXT:    st.w $ra, $sp, 28 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $fp, $sp, 24 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s0, $sp, 20 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s1, $sp, 16 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s2, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    move $fp, $a0
+; LA32-NEXT:    ld.w $a5, $a0, 4
+; LA32-NEXT:    ld.w $a4, $a0, 0
+; LA32-NEXT:    move $s0, $a2
+; LA32-NEXT:    move $s1, $a1
+; LA32-NEXT:    addi.w $s2, $sp, 0
+; LA32-NEXT:    b .LBB79_2
+; LA32-NEXT:    .p2align 4, , 16
+; LA32-NEXT:  .LBB79_1: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB79_2 Depth=1
+; LA32-NEXT:    st.w $a4, $sp, 0
+; LA32-NEXT:    st.w $a5, $sp, 4
+; LA32-NEXT:    move $a0, $fp
+; LA32-NEXT:    move $a1, $s2
+; LA32-NEXT:    move $a4, $zero
+; LA32-NEXT:    move $a5, $zero
+; LA32-NEXT:    bl __atomic_compare_exchange_8
+; LA32-NEXT:    ld.w $a5, $sp, 4
+; LA32-NEXT:    ld.w $a4, $sp, 0
+; LA32-NEXT:    bne $a0, $zero, .LBB79_7
+; LA32-NEXT:  .LBB79_2: # %atomicrmw.start
+; LA32-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    beq $a5, $s0, .LBB79_4
+; LA32-NEXT:  # %bb.3: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB79_2 Depth=1
+; LA32-NEXT:    slt $a0, $s0, $a5
+; LA32-NEXT:    b .LBB79_5
+; LA32-NEXT:    .p2align 4, , 16
+; LA32-NEXT:  .LBB79_4: # in Loop: Header=BB79_2 Depth=1
+; LA32-NEXT:    sltu $a0, $s1, $a4
+; LA32-NEXT:  .LBB79_5: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB79_2 Depth=1
+; LA32-NEXT:    xori $a0, $a0, 1
+; LA32-NEXT:    move $a2, $a4
+; LA32-NEXT:    move $a3, $a5
+; LA32-NEXT:    bne $a0, $zero, .LBB79_1
+; LA32-NEXT:  # %bb.6: # %atomicrmw.start
+; LA32-NEXT:    # in Loop: Header=BB79_2 Depth=1
+; LA32-NEXT:    move $a2, $s1
+; LA32-NEXT:    move $a3, $s0
+; LA32-NEXT:    b .LBB79_1
+; LA32-NEXT:  .LBB79_7: # %atomicrmw.end
+; LA32-NEXT:    move $a0, $a4
+; LA32-NEXT:    move $a1, $a5
+; LA32-NEXT:    ld.w $s2, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $s1, $sp, 16 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $s0, $sp, 20 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $fp, $sp, 24 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $ra, $sp, 28 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 32
+; LA32-NEXT:    ret
+;
 ; LA64-LABEL: atomicrmw_min_i64_monotonic:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ammin.d $a2, $a1, $a0



More information about the llvm-commits mailing list