[llvm] [RISCV] 'Zalrsc' may permit non-base instructions (PR #165042)
    via llvm-commits 
    llvm-commits at lists.llvm.org
       
    Fri Oct 24 18:13:08 PDT 2025
    
    
  
https://github.com/slachowsky updated https://github.com/llvm/llvm-project/pull/165042
>From 07af5e49e554d5f359a939632616e23d671d456d Mon Sep 17 00:00:00 2001
From: Stephan Lachowsky <slachowsky at apple.com>
Date: Fri, 24 Oct 2025 13:15:11 -0700
Subject: [PATCH 1/4] [RISCV] 'Zalrsc' may permit non-base instructions
Provide shorter atomic LR/SC sequences with non-base instructions (eg.
''B'' extension instructions) when implementations opt in to
FeaturePermissiveZalrsc.  Currently this shortens `atomicrmw
{min,max,umin,umax}` pseudo expansions.
There is no functional change for machines when this target feature is
not requested.
---
 .../RISCV/RISCVExpandAtomicPseudoInsts.cpp    |   23 +
 llvm/lib/Target/RISCV/RISCVFeatures.td        |   16 +
 llvm/test/CodeGen/RISCV/atomic-rmw-minmax.ll  | 1074 +++++++++++++++++
 llvm/test/CodeGen/RISCV/features-info.ll      |    1 +
 4 files changed, 1114 insertions(+)
 create mode 100644 llvm/test/CodeGen/RISCV/atomic-rmw-minmax.ll
diff --git a/llvm/lib/Target/RISCV/RISCVExpandAtomicPseudoInsts.cpp b/llvm/lib/Target/RISCV/RISCVExpandAtomicPseudoInsts.cpp
index 98b636e8e0e55..9bd66a43717e7 100644
--- a/llvm/lib/Target/RISCV/RISCVExpandAtomicPseudoInsts.cpp
+++ b/llvm/lib/Target/RISCV/RISCVExpandAtomicPseudoInsts.cpp
@@ -373,6 +373,26 @@ static void doAtomicBinOpExpansion(const RISCVInstrInfo *TII, MachineInstr &MI,
         .addReg(ScratchReg)
         .addImm(-1);
     break;
+  case AtomicRMWInst::Max:
+    BuildMI(LoopMBB, DL, TII->get(RISCV::MAX), ScratchReg)
+        .addReg(DestReg)
+        .addReg(IncrReg);
+    break;
+  case AtomicRMWInst::Min:
+    BuildMI(LoopMBB, DL, TII->get(RISCV::MIN), ScratchReg)
+        .addReg(DestReg)
+        .addReg(IncrReg);
+    break;
+  case AtomicRMWInst::UMax:
+    BuildMI(LoopMBB, DL, TII->get(RISCV::MAXU), ScratchReg)
+        .addReg(DestReg)
+        .addReg(IncrReg);
+    break;
+  case AtomicRMWInst::UMin:
+    BuildMI(LoopMBB, DL, TII->get(RISCV::MINU), ScratchReg)
+        .addReg(DestReg)
+        .addReg(IncrReg);
+    break;
   }
   BuildMI(LoopMBB, DL, TII->get(getSCForRMW(Ordering, Width, STI)), ScratchReg)
       .addReg(ScratchReg)
@@ -682,6 +702,9 @@ bool RISCVExpandAtomicPseudo::expandAtomicMinMaxOp(
     MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
     AtomicRMWInst::BinOp BinOp, bool IsMasked, int Width,
     MachineBasicBlock::iterator &NextMBBI) {
+  // Using MIN(U)/MAX(U) is preferrable if permitted
+  if (STI->hasPermissiveZalrsc() && STI->hasStdExtZbb() && !IsMasked)
+    return expandAtomicBinOp(MBB, MBBI, BinOp, IsMasked, Width, NextMBBI);
 
   MachineInstr &MI = *MBBI;
   DebugLoc DL = MI.getDebugLoc();
diff --git a/llvm/lib/Target/RISCV/RISCVFeatures.td b/llvm/lib/Target/RISCV/RISCVFeatures.td
index 2754d789b9899..3c1e9665d823e 100644
--- a/llvm/lib/Target/RISCV/RISCVFeatures.td
+++ b/llvm/lib/Target/RISCV/RISCVFeatures.td
@@ -1906,6 +1906,22 @@ def FeatureForcedAtomics : SubtargetFeature<
 def HasAtomicLdSt
     : Predicate<"Subtarget->hasStdExtZalrsc() || Subtarget->hasForcedAtomics()">;
 
+// The RISCV Unprivileged Architecture defines _constrained_ LR/SC loops:
+//   The dynamic code executed between the LR and SC instructions can only
+//   contain instructions from the base ''I'' instruction set, excluding loads,
+//   stores, backward jumps, taken backward branches, JALR, FENCE, and SYSTEM
+//   instructions. Compressed forms of the aforementioned ''I'' instructions in
+//   the Zca and Zcb extensions are also permitted.
+// LR/SC loops that do not adhere to the above are _unconstrained_ LR/SC loops,
+// and success is implementation specific. For implementations which know that
+// non-base instructions (such as the ''B'' extension) will not violate any
+// forward progress guarantees, using these instructions to reduce the LR/SC
+// sequence length is desirable.
+def FeaturePermissiveZalrsc
+    : SubtargetFeature<
+          "permissive-zalrsc", "HasPermissiveZalrsc", "true",
+          "Implementation permits non-base instructions between LR/SC pairs">;
+
 def FeatureTaggedGlobals : SubtargetFeature<"tagged-globals",
     "AllowTaggedGlobals",
     "true", "Use an instruction sequence for taking the address of a global "
diff --git a/llvm/test/CodeGen/RISCV/atomic-rmw-minmax.ll b/llvm/test/CodeGen/RISCV/atomic-rmw-minmax.ll
new file mode 100644
index 0000000000000..9ce987c1add50
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/atomic-rmw-minmax.ll
@@ -0,0 +1,1074 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+zalrsc -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefixes=RV32I-ZALRSC %s
+; RUN: llc -mtriple=riscv32 -mattr=+b,+zalrsc,+permissive-zalrsc -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefixes=RV32IB-ZALRSC %s
+; RUN: llc -mtriple=riscv32 -mattr=+a -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefixes=RV32IA %s
+;
+; RUN: llc -mtriple=riscv64 -mattr=+zalrsc -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefixes=RV64I-ZALRSC %s
+; RUN: llc -mtriple=riscv64 -mattr=+b,+zalrsc,+permissive-zalrsc -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefixes=RV64IB-ZALRSC %s
+; RUN: llc -mtriple=riscv64 -mattr=+a -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefixes=RV64IA %s
+
+define i32 @atomicrmw_max_i32_seq_cst(ptr %a, i32 %b) nounwind {
+; RV32I-ZALRSC-LABEL: atomicrmw_max_i32_seq_cst:
+; RV32I-ZALRSC:       # %bb.0:
+; RV32I-ZALRSC-NEXT:  .LBB0_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT:    lr.w.aqrl a2, (a0)
+; RV32I-ZALRSC-NEXT:    mv a3, a2
+; RV32I-ZALRSC-NEXT:    bge a3, a1, .LBB0_3
+; RV32I-ZALRSC-NEXT:  # %bb.2: # in Loop: Header=BB0_1 Depth=1
+; RV32I-ZALRSC-NEXT:    mv a3, a1
+; RV32I-ZALRSC-NEXT:  .LBB0_3: # in Loop: Header=BB0_1 Depth=1
+; RV32I-ZALRSC-NEXT:    sc.w.rl a3, a3, (a0)
+; RV32I-ZALRSC-NEXT:    bnez a3, .LBB0_1
+; RV32I-ZALRSC-NEXT:  # %bb.4:
+; RV32I-ZALRSC-NEXT:    mv a0, a2
+; RV32I-ZALRSC-NEXT:    ret
+;
+; RV32IB-ZALRSC-LABEL: atomicrmw_max_i32_seq_cst:
+; RV32IB-ZALRSC:       # %bb.0:
+; RV32IB-ZALRSC-NEXT:  .LBB0_1: # =>This Inner Loop Header: Depth=1
+; RV32IB-ZALRSC-NEXT:    lr.w.aqrl a2, (a0)
+; RV32IB-ZALRSC-NEXT:    max a3, a2, a1
+; RV32IB-ZALRSC-NEXT:    sc.w.rl a3, a3, (a0)
+; RV32IB-ZALRSC-NEXT:    bnez a3, .LBB0_1
+; RV32IB-ZALRSC-NEXT:  # %bb.2:
+; RV32IB-ZALRSC-NEXT:    mv a0, a2
+; RV32IB-ZALRSC-NEXT:    ret
+;
+; RV32IA-LABEL: atomicrmw_max_i32_seq_cst:
+; RV32IA:       # %bb.0:
+; RV32IA-NEXT:    amomax.w.aqrl a0, a1, (a0)
+; RV32IA-NEXT:    ret
+;
+; RV64I-ZALRSC-LABEL: atomicrmw_max_i32_seq_cst:
+; RV64I-ZALRSC:       # %bb.0:
+; RV64I-ZALRSC-NEXT:    sext.w a2, a1
+; RV64I-ZALRSC-NEXT:  .LBB0_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT:    lr.w.aqrl a1, (a0)
+; RV64I-ZALRSC-NEXT:    mv a3, a1
+; RV64I-ZALRSC-NEXT:    bge a3, a2, .LBB0_3
+; RV64I-ZALRSC-NEXT:  # %bb.2: # in Loop: Header=BB0_1 Depth=1
+; RV64I-ZALRSC-NEXT:    mv a3, a2
+; RV64I-ZALRSC-NEXT:  .LBB0_3: # in Loop: Header=BB0_1 Depth=1
+; RV64I-ZALRSC-NEXT:    sc.w.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT:    bnez a3, .LBB0_1
+; RV64I-ZALRSC-NEXT:  # %bb.4:
+; RV64I-ZALRSC-NEXT:    mv a0, a1
+; RV64I-ZALRSC-NEXT:    ret
+;
+; RV64IB-ZALRSC-LABEL: atomicrmw_max_i32_seq_cst:
+; RV64IB-ZALRSC:       # %bb.0:
+; RV64IB-ZALRSC-NEXT:    sext.w a2, a1
+; RV64IB-ZALRSC-NEXT:  .LBB0_1: # =>This Inner Loop Header: Depth=1
+; RV64IB-ZALRSC-NEXT:    lr.w.aqrl a1, (a0)
+; RV64IB-ZALRSC-NEXT:    max a3, a1, a2
+; RV64IB-ZALRSC-NEXT:    sc.w.rl a3, a3, (a0)
+; RV64IB-ZALRSC-NEXT:    bnez a3, .LBB0_1
+; RV64IB-ZALRSC-NEXT:  # %bb.2:
+; RV64IB-ZALRSC-NEXT:    mv a0, a1
+; RV64IB-ZALRSC-NEXT:    ret
+;
+; RV64IA-LABEL: atomicrmw_max_i32_seq_cst:
+; RV64IA:       # %bb.0:
+; RV64IA-NEXT:    amomax.w.aqrl a0, a1, (a0)
+; RV64IA-NEXT:    ret
+  %1 = atomicrmw max ptr %a, i32 %b seq_cst
+  ret i32 %1
+}
+
+define i32 @atomicrmw_min_i32_seq_cst(ptr %a, i32 %b) nounwind {
+; RV32I-ZALRSC-LABEL: atomicrmw_min_i32_seq_cst:
+; RV32I-ZALRSC:       # %bb.0:
+; RV32I-ZALRSC-NEXT:  .LBB1_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT:    lr.w.aqrl a2, (a0)
+; RV32I-ZALRSC-NEXT:    mv a3, a2
+; RV32I-ZALRSC-NEXT:    bge a1, a3, .LBB1_3
+; RV32I-ZALRSC-NEXT:  # %bb.2: # in Loop: Header=BB1_1 Depth=1
+; RV32I-ZALRSC-NEXT:    mv a3, a1
+; RV32I-ZALRSC-NEXT:  .LBB1_3: # in Loop: Header=BB1_1 Depth=1
+; RV32I-ZALRSC-NEXT:    sc.w.rl a3, a3, (a0)
+; RV32I-ZALRSC-NEXT:    bnez a3, .LBB1_1
+; RV32I-ZALRSC-NEXT:  # %bb.4:
+; RV32I-ZALRSC-NEXT:    mv a0, a2
+; RV32I-ZALRSC-NEXT:    ret
+;
+; RV32IB-ZALRSC-LABEL: atomicrmw_min_i32_seq_cst:
+; RV32IB-ZALRSC:       # %bb.0:
+; RV32IB-ZALRSC-NEXT:  .LBB1_1: # =>This Inner Loop Header: Depth=1
+; RV32IB-ZALRSC-NEXT:    lr.w.aqrl a2, (a0)
+; RV32IB-ZALRSC-NEXT:    min a3, a2, a1
+; RV32IB-ZALRSC-NEXT:    sc.w.rl a3, a3, (a0)
+; RV32IB-ZALRSC-NEXT:    bnez a3, .LBB1_1
+; RV32IB-ZALRSC-NEXT:  # %bb.2:
+; RV32IB-ZALRSC-NEXT:    mv a0, a2
+; RV32IB-ZALRSC-NEXT:    ret
+;
+; RV32IA-LABEL: atomicrmw_min_i32_seq_cst:
+; RV32IA:       # %bb.0:
+; RV32IA-NEXT:    amomin.w.aqrl a0, a1, (a0)
+; RV32IA-NEXT:    ret
+;
+; RV64I-ZALRSC-LABEL: atomicrmw_min_i32_seq_cst:
+; RV64I-ZALRSC:       # %bb.0:
+; RV64I-ZALRSC-NEXT:    sext.w a2, a1
+; RV64I-ZALRSC-NEXT:  .LBB1_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT:    lr.w.aqrl a1, (a0)
+; RV64I-ZALRSC-NEXT:    mv a3, a1
+; RV64I-ZALRSC-NEXT:    bge a2, a3, .LBB1_3
+; RV64I-ZALRSC-NEXT:  # %bb.2: # in Loop: Header=BB1_1 Depth=1
+; RV64I-ZALRSC-NEXT:    mv a3, a2
+; RV64I-ZALRSC-NEXT:  .LBB1_3: # in Loop: Header=BB1_1 Depth=1
+; RV64I-ZALRSC-NEXT:    sc.w.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT:    bnez a3, .LBB1_1
+; RV64I-ZALRSC-NEXT:  # %bb.4:
+; RV64I-ZALRSC-NEXT:    mv a0, a1
+; RV64I-ZALRSC-NEXT:    ret
+;
+; RV64IB-ZALRSC-LABEL: atomicrmw_min_i32_seq_cst:
+; RV64IB-ZALRSC:       # %bb.0:
+; RV64IB-ZALRSC-NEXT:    sext.w a2, a1
+; RV64IB-ZALRSC-NEXT:  .LBB1_1: # =>This Inner Loop Header: Depth=1
+; RV64IB-ZALRSC-NEXT:    lr.w.aqrl a1, (a0)
+; RV64IB-ZALRSC-NEXT:    min a3, a1, a2
+; RV64IB-ZALRSC-NEXT:    sc.w.rl a3, a3, (a0)
+; RV64IB-ZALRSC-NEXT:    bnez a3, .LBB1_1
+; RV64IB-ZALRSC-NEXT:  # %bb.2:
+; RV64IB-ZALRSC-NEXT:    mv a0, a1
+; RV64IB-ZALRSC-NEXT:    ret
+;
+; RV64IA-LABEL: atomicrmw_min_i32_seq_cst:
+; RV64IA:       # %bb.0:
+; RV64IA-NEXT:    amomin.w.aqrl a0, a1, (a0)
+; RV64IA-NEXT:    ret
+  %1 = atomicrmw min ptr %a, i32 %b seq_cst
+  ret i32 %1
+}
+
+define i32 @atomicrmw_umax_i32_seq_cst(ptr %a, i32 %b) nounwind {
+; RV32I-ZALRSC-LABEL: atomicrmw_umax_i32_seq_cst:
+; RV32I-ZALRSC:       # %bb.0:
+; RV32I-ZALRSC-NEXT:  .LBB2_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT:    lr.w.aqrl a2, (a0)
+; RV32I-ZALRSC-NEXT:    mv a3, a2
+; RV32I-ZALRSC-NEXT:    bgeu a3, a1, .LBB2_3
+; RV32I-ZALRSC-NEXT:  # %bb.2: # in Loop: Header=BB2_1 Depth=1
+; RV32I-ZALRSC-NEXT:    mv a3, a1
+; RV32I-ZALRSC-NEXT:  .LBB2_3: # in Loop: Header=BB2_1 Depth=1
+; RV32I-ZALRSC-NEXT:    sc.w.rl a3, a3, (a0)
+; RV32I-ZALRSC-NEXT:    bnez a3, .LBB2_1
+; RV32I-ZALRSC-NEXT:  # %bb.4:
+; RV32I-ZALRSC-NEXT:    mv a0, a2
+; RV32I-ZALRSC-NEXT:    ret
+;
+; RV32IB-ZALRSC-LABEL: atomicrmw_umax_i32_seq_cst:
+; RV32IB-ZALRSC:       # %bb.0:
+; RV32IB-ZALRSC-NEXT:  .LBB2_1: # =>This Inner Loop Header: Depth=1
+; RV32IB-ZALRSC-NEXT:    lr.w.aqrl a2, (a0)
+; RV32IB-ZALRSC-NEXT:    maxu a3, a2, a1
+; RV32IB-ZALRSC-NEXT:    sc.w.rl a3, a3, (a0)
+; RV32IB-ZALRSC-NEXT:    bnez a3, .LBB2_1
+; RV32IB-ZALRSC-NEXT:  # %bb.2:
+; RV32IB-ZALRSC-NEXT:    mv a0, a2
+; RV32IB-ZALRSC-NEXT:    ret
+;
+; RV32IA-LABEL: atomicrmw_umax_i32_seq_cst:
+; RV32IA:       # %bb.0:
+; RV32IA-NEXT:    amomaxu.w.aqrl a0, a1, (a0)
+; RV32IA-NEXT:    ret
+;
+; RV64I-ZALRSC-LABEL: atomicrmw_umax_i32_seq_cst:
+; RV64I-ZALRSC:       # %bb.0:
+; RV64I-ZALRSC-NEXT:    sext.w a2, a1
+; RV64I-ZALRSC-NEXT:  .LBB2_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT:    lr.w.aqrl a1, (a0)
+; RV64I-ZALRSC-NEXT:    mv a3, a1
+; RV64I-ZALRSC-NEXT:    bgeu a3, a2, .LBB2_3
+; RV64I-ZALRSC-NEXT:  # %bb.2: # in Loop: Header=BB2_1 Depth=1
+; RV64I-ZALRSC-NEXT:    mv a3, a2
+; RV64I-ZALRSC-NEXT:  .LBB2_3: # in Loop: Header=BB2_1 Depth=1
+; RV64I-ZALRSC-NEXT:    sc.w.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT:    bnez a3, .LBB2_1
+; RV64I-ZALRSC-NEXT:  # %bb.4:
+; RV64I-ZALRSC-NEXT:    mv a0, a1
+; RV64I-ZALRSC-NEXT:    ret
+;
+; RV64IB-ZALRSC-LABEL: atomicrmw_umax_i32_seq_cst:
+; RV64IB-ZALRSC:       # %bb.0:
+; RV64IB-ZALRSC-NEXT:    sext.w a2, a1
+; RV64IB-ZALRSC-NEXT:  .LBB2_1: # =>This Inner Loop Header: Depth=1
+; RV64IB-ZALRSC-NEXT:    lr.w.aqrl a1, (a0)
+; RV64IB-ZALRSC-NEXT:    maxu a3, a1, a2
+; RV64IB-ZALRSC-NEXT:    sc.w.rl a3, a3, (a0)
+; RV64IB-ZALRSC-NEXT:    bnez a3, .LBB2_1
+; RV64IB-ZALRSC-NEXT:  # %bb.2:
+; RV64IB-ZALRSC-NEXT:    mv a0, a1
+; RV64IB-ZALRSC-NEXT:    ret
+;
+; RV64IA-LABEL: atomicrmw_umax_i32_seq_cst:
+; RV64IA:       # %bb.0:
+; RV64IA-NEXT:    amomaxu.w.aqrl a0, a1, (a0)
+; RV64IA-NEXT:    ret
+  %1 = atomicrmw umax ptr %a, i32 %b seq_cst
+  ret i32 %1
+}
+
+define i32 @atomicrmw_umin_i32_seq_cst(ptr %a, i32 %b) nounwind {
+; RV32I-ZALRSC-LABEL: atomicrmw_umin_i32_seq_cst:
+; RV32I-ZALRSC:       # %bb.0:
+; RV32I-ZALRSC-NEXT:  .LBB3_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT:    lr.w.aqrl a2, (a0)
+; RV32I-ZALRSC-NEXT:    mv a3, a2
+; RV32I-ZALRSC-NEXT:    bgeu a1, a3, .LBB3_3
+; RV32I-ZALRSC-NEXT:  # %bb.2: # in Loop: Header=BB3_1 Depth=1
+; RV32I-ZALRSC-NEXT:    mv a3, a1
+; RV32I-ZALRSC-NEXT:  .LBB3_3: # in Loop: Header=BB3_1 Depth=1
+; RV32I-ZALRSC-NEXT:    sc.w.rl a3, a3, (a0)
+; RV32I-ZALRSC-NEXT:    bnez a3, .LBB3_1
+; RV32I-ZALRSC-NEXT:  # %bb.4:
+; RV32I-ZALRSC-NEXT:    mv a0, a2
+; RV32I-ZALRSC-NEXT:    ret
+;
+; RV32IB-ZALRSC-LABEL: atomicrmw_umin_i32_seq_cst:
+; RV32IB-ZALRSC:       # %bb.0:
+; RV32IB-ZALRSC-NEXT:  .LBB3_1: # =>This Inner Loop Header: Depth=1
+; RV32IB-ZALRSC-NEXT:    lr.w.aqrl a2, (a0)
+; RV32IB-ZALRSC-NEXT:    minu a3, a2, a1
+; RV32IB-ZALRSC-NEXT:    sc.w.rl a3, a3, (a0)
+; RV32IB-ZALRSC-NEXT:    bnez a3, .LBB3_1
+; RV32IB-ZALRSC-NEXT:  # %bb.2:
+; RV32IB-ZALRSC-NEXT:    mv a0, a2
+; RV32IB-ZALRSC-NEXT:    ret
+;
+; RV32IA-LABEL: atomicrmw_umin_i32_seq_cst:
+; RV32IA:       # %bb.0:
+; RV32IA-NEXT:    amominu.w.aqrl a0, a1, (a0)
+; RV32IA-NEXT:    ret
+;
+; RV64I-ZALRSC-LABEL: atomicrmw_umin_i32_seq_cst:
+; RV64I-ZALRSC:       # %bb.0:
+; RV64I-ZALRSC-NEXT:    sext.w a2, a1
+; RV64I-ZALRSC-NEXT:  .LBB3_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT:    lr.w.aqrl a1, (a0)
+; RV64I-ZALRSC-NEXT:    mv a3, a1
+; RV64I-ZALRSC-NEXT:    bgeu a2, a3, .LBB3_3
+; RV64I-ZALRSC-NEXT:  # %bb.2: # in Loop: Header=BB3_1 Depth=1
+; RV64I-ZALRSC-NEXT:    mv a3, a2
+; RV64I-ZALRSC-NEXT:  .LBB3_3: # in Loop: Header=BB3_1 Depth=1
+; RV64I-ZALRSC-NEXT:    sc.w.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT:    bnez a3, .LBB3_1
+; RV64I-ZALRSC-NEXT:  # %bb.4:
+; RV64I-ZALRSC-NEXT:    mv a0, a1
+; RV64I-ZALRSC-NEXT:    ret
+;
+; RV64IB-ZALRSC-LABEL: atomicrmw_umin_i32_seq_cst:
+; RV64IB-ZALRSC:       # %bb.0:
+; RV64IB-ZALRSC-NEXT:    sext.w a2, a1
+; RV64IB-ZALRSC-NEXT:  .LBB3_1: # =>This Inner Loop Header: Depth=1
+; RV64IB-ZALRSC-NEXT:    lr.w.aqrl a1, (a0)
+; RV64IB-ZALRSC-NEXT:    minu a3, a1, a2
+; RV64IB-ZALRSC-NEXT:    sc.w.rl a3, a3, (a0)
+; RV64IB-ZALRSC-NEXT:    bnez a3, .LBB3_1
+; RV64IB-ZALRSC-NEXT:  # %bb.2:
+; RV64IB-ZALRSC-NEXT:    mv a0, a1
+; RV64IB-ZALRSC-NEXT:    ret
+;
+; RV64IA-LABEL: atomicrmw_umin_i32_seq_cst:
+; RV64IA:       # %bb.0:
+; RV64IA-NEXT:    amominu.w.aqrl a0, a1, (a0)
+; RV64IA-NEXT:    ret
+  %1 = atomicrmw umin ptr %a, i32 %b seq_cst
+  ret i32 %1
+}
+
+define i64 @atomicrmw_max_i64_seq_cst(ptr %a, i64 %b) nounwind {
+; RV32I-ZALRSC-LABEL: atomicrmw_max_i64_seq_cst:
+; RV32I-ZALRSC:       # %bb.0:
+; RV32I-ZALRSC-NEXT:    addi sp, sp, -32
+; RV32I-ZALRSC-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT:    mv s0, a2
+; RV32I-ZALRSC-NEXT:    mv s1, a0
+; RV32I-ZALRSC-NEXT:    lw a4, 0(a0)
+; RV32I-ZALRSC-NEXT:    lw a5, 4(a0)
+; RV32I-ZALRSC-NEXT:    mv s2, a1
+; RV32I-ZALRSC-NEXT:    j .LBB4_2
+; RV32I-ZALRSC-NEXT:  .LBB4_1: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT:    # in Loop: Header=BB4_2 Depth=1
+; RV32I-ZALRSC-NEXT:    sw a4, 8(sp)
+; RV32I-ZALRSC-NEXT:    sw a5, 12(sp)
+; RV32I-ZALRSC-NEXT:    addi a1, sp, 8
+; RV32I-ZALRSC-NEXT:    li a4, 5
+; RV32I-ZALRSC-NEXT:    li a5, 5
+; RV32I-ZALRSC-NEXT:    mv a0, s1
+; RV32I-ZALRSC-NEXT:    call __atomic_compare_exchange_8
+; RV32I-ZALRSC-NEXT:    lw a4, 8(sp)
+; RV32I-ZALRSC-NEXT:    lw a5, 12(sp)
+; RV32I-ZALRSC-NEXT:    bnez a0, .LBB4_7
+; RV32I-ZALRSC-NEXT:  .LBB4_2: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT:    beq a5, s0, .LBB4_4
+; RV32I-ZALRSC-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT:    # in Loop: Header=BB4_2 Depth=1
+; RV32I-ZALRSC-NEXT:    slt a0, s0, a5
+; RV32I-ZALRSC-NEXT:    j .LBB4_5
+; RV32I-ZALRSC-NEXT:  .LBB4_4: # in Loop: Header=BB4_2 Depth=1
+; RV32I-ZALRSC-NEXT:    sltu a0, s2, a4
+; RV32I-ZALRSC-NEXT:  .LBB4_5: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT:    # in Loop: Header=BB4_2 Depth=1
+; RV32I-ZALRSC-NEXT:    mv a2, a4
+; RV32I-ZALRSC-NEXT:    mv a3, a5
+; RV32I-ZALRSC-NEXT:    bnez a0, .LBB4_1
+; RV32I-ZALRSC-NEXT:  # %bb.6: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT:    # in Loop: Header=BB4_2 Depth=1
+; RV32I-ZALRSC-NEXT:    mv a2, s2
+; RV32I-ZALRSC-NEXT:    mv a3, s0
+; RV32I-ZALRSC-NEXT:    j .LBB4_1
+; RV32I-ZALRSC-NEXT:  .LBB4_7: # %atomicrmw.end
+; RV32I-ZALRSC-NEXT:    mv a0, a4
+; RV32I-ZALRSC-NEXT:    mv a1, a5
+; RV32I-ZALRSC-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT:    addi sp, sp, 32
+; RV32I-ZALRSC-NEXT:    ret
+;
+; RV32IB-ZALRSC-LABEL: atomicrmw_max_i64_seq_cst:
+; RV32IB-ZALRSC:       # %bb.0:
+; RV32IB-ZALRSC-NEXT:    addi sp, sp, -32
+; RV32IB-ZALRSC-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32IB-ZALRSC-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32IB-ZALRSC-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32IB-ZALRSC-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
+; RV32IB-ZALRSC-NEXT:    mv s0, a2
+; RV32IB-ZALRSC-NEXT:    mv s1, a0
+; RV32IB-ZALRSC-NEXT:    lw a4, 0(a0)
+; RV32IB-ZALRSC-NEXT:    lw a5, 4(a0)
+; RV32IB-ZALRSC-NEXT:    mv s2, a1
+; RV32IB-ZALRSC-NEXT:    j .LBB4_2
+; RV32IB-ZALRSC-NEXT:  .LBB4_1: # %atomicrmw.start
+; RV32IB-ZALRSC-NEXT:    # in Loop: Header=BB4_2 Depth=1
+; RV32IB-ZALRSC-NEXT:    sw a4, 8(sp)
+; RV32IB-ZALRSC-NEXT:    sw a5, 12(sp)
+; RV32IB-ZALRSC-NEXT:    addi a1, sp, 8
+; RV32IB-ZALRSC-NEXT:    li a4, 5
+; RV32IB-ZALRSC-NEXT:    li a5, 5
+; RV32IB-ZALRSC-NEXT:    mv a0, s1
+; RV32IB-ZALRSC-NEXT:    call __atomic_compare_exchange_8
+; RV32IB-ZALRSC-NEXT:    lw a4, 8(sp)
+; RV32IB-ZALRSC-NEXT:    lw a5, 12(sp)
+; RV32IB-ZALRSC-NEXT:    bnez a0, .LBB4_7
+; RV32IB-ZALRSC-NEXT:  .LBB4_2: # %atomicrmw.start
+; RV32IB-ZALRSC-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32IB-ZALRSC-NEXT:    beq a5, s0, .LBB4_4
+; RV32IB-ZALRSC-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32IB-ZALRSC-NEXT:    # in Loop: Header=BB4_2 Depth=1
+; RV32IB-ZALRSC-NEXT:    slt a0, s0, a5
+; RV32IB-ZALRSC-NEXT:    j .LBB4_5
+; RV32IB-ZALRSC-NEXT:  .LBB4_4: # in Loop: Header=BB4_2 Depth=1
+; RV32IB-ZALRSC-NEXT:    sltu a0, s2, a4
+; RV32IB-ZALRSC-NEXT:  .LBB4_5: # %atomicrmw.start
+; RV32IB-ZALRSC-NEXT:    # in Loop: Header=BB4_2 Depth=1
+; RV32IB-ZALRSC-NEXT:    mv a2, a4
+; RV32IB-ZALRSC-NEXT:    mv a3, a5
+; RV32IB-ZALRSC-NEXT:    bnez a0, .LBB4_1
+; RV32IB-ZALRSC-NEXT:  # %bb.6: # %atomicrmw.start
+; RV32IB-ZALRSC-NEXT:    # in Loop: Header=BB4_2 Depth=1
+; RV32IB-ZALRSC-NEXT:    mv a2, s2
+; RV32IB-ZALRSC-NEXT:    mv a3, s0
+; RV32IB-ZALRSC-NEXT:    j .LBB4_1
+; RV32IB-ZALRSC-NEXT:  .LBB4_7: # %atomicrmw.end
+; RV32IB-ZALRSC-NEXT:    mv a0, a4
+; RV32IB-ZALRSC-NEXT:    mv a1, a5
+; RV32IB-ZALRSC-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32IB-ZALRSC-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32IB-ZALRSC-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32IB-ZALRSC-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32IB-ZALRSC-NEXT:    addi sp, sp, 32
+; RV32IB-ZALRSC-NEXT:    ret
+;
+; RV32IA-LABEL: atomicrmw_max_i64_seq_cst:
+; RV32IA:       # %bb.0:
+; RV32IA-NEXT:    addi sp, sp, -32
+; RV32IA-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    mv s0, a2
+; RV32IA-NEXT:    mv s1, a0
+; RV32IA-NEXT:    lw a4, 0(a0)
+; RV32IA-NEXT:    lw a5, 4(a0)
+; RV32IA-NEXT:    mv s2, a1
+; RV32IA-NEXT:    j .LBB4_2
+; RV32IA-NEXT:  .LBB4_1: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB4_2 Depth=1
+; RV32IA-NEXT:    sw a4, 8(sp)
+; RV32IA-NEXT:    sw a5, 12(sp)
+; RV32IA-NEXT:    addi a1, sp, 8
+; RV32IA-NEXT:    li a4, 5
+; RV32IA-NEXT:    li a5, 5
+; RV32IA-NEXT:    mv a0, s1
+; RV32IA-NEXT:    call __atomic_compare_exchange_8
+; RV32IA-NEXT:    lw a4, 8(sp)
+; RV32IA-NEXT:    lw a5, 12(sp)
+; RV32IA-NEXT:    bnez a0, .LBB4_7
+; RV32IA-NEXT:  .LBB4_2: # %atomicrmw.start
+; RV32IA-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:    beq a5, s0, .LBB4_4
+; RV32IA-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB4_2 Depth=1
+; RV32IA-NEXT:    slt a0, s0, a5
+; RV32IA-NEXT:    j .LBB4_5
+; RV32IA-NEXT:  .LBB4_4: # in Loop: Header=BB4_2 Depth=1
+; RV32IA-NEXT:    sltu a0, s2, a4
+; RV32IA-NEXT:  .LBB4_5: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB4_2 Depth=1
+; RV32IA-NEXT:    mv a2, a4
+; RV32IA-NEXT:    mv a3, a5
+; RV32IA-NEXT:    bnez a0, .LBB4_1
+; RV32IA-NEXT:  # %bb.6: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB4_2 Depth=1
+; RV32IA-NEXT:    mv a2, s2
+; RV32IA-NEXT:    mv a3, s0
+; RV32IA-NEXT:    j .LBB4_1
+; RV32IA-NEXT:  .LBB4_7: # %atomicrmw.end
+; RV32IA-NEXT:    mv a0, a4
+; RV32IA-NEXT:    mv a1, a5
+; RV32IA-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    addi sp, sp, 32
+; RV32IA-NEXT:    ret
+;
+; RV64I-ZALRSC-LABEL: atomicrmw_max_i64_seq_cst:
+; RV64I-ZALRSC:       # %bb.0:
+; RV64I-ZALRSC-NEXT:  .LBB4_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT:    lr.d.aqrl a2, (a0)
+; RV64I-ZALRSC-NEXT:    mv a3, a2
+; RV64I-ZALRSC-NEXT:    bge a3, a1, .LBB4_3
+; RV64I-ZALRSC-NEXT:  # %bb.2: # in Loop: Header=BB4_1 Depth=1
+; RV64I-ZALRSC-NEXT:    mv a3, a1
+; RV64I-ZALRSC-NEXT:  .LBB4_3: # in Loop: Header=BB4_1 Depth=1
+; RV64I-ZALRSC-NEXT:    sc.d.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT:    bnez a3, .LBB4_1
+; RV64I-ZALRSC-NEXT:  # %bb.4:
+; RV64I-ZALRSC-NEXT:    mv a0, a2
+; RV64I-ZALRSC-NEXT:    ret
+;
+; RV64IB-ZALRSC-LABEL: atomicrmw_max_i64_seq_cst:
+; RV64IB-ZALRSC:       # %bb.0:
+; RV64IB-ZALRSC-NEXT:  .LBB4_1: # =>This Inner Loop Header: Depth=1
+; RV64IB-ZALRSC-NEXT:    lr.d.aqrl a2, (a0)
+; RV64IB-ZALRSC-NEXT:    max a3, a2, a1
+; RV64IB-ZALRSC-NEXT:    sc.d.rl a3, a3, (a0)
+; RV64IB-ZALRSC-NEXT:    bnez a3, .LBB4_1
+; RV64IB-ZALRSC-NEXT:  # %bb.2:
+; RV64IB-ZALRSC-NEXT:    mv a0, a2
+; RV64IB-ZALRSC-NEXT:    ret
+;
+; RV64IA-LABEL: atomicrmw_max_i64_seq_cst:
+; RV64IA:       # %bb.0:
+; RV64IA-NEXT:    amomax.d.aqrl a0, a1, (a0)
+; RV64IA-NEXT:    ret
+  %1 = atomicrmw max ptr %a, i64 %b seq_cst
+  ret i64 %1
+}
+
+define i64 @atomicrmw_min_i64_seq_cst(ptr %a, i64 %b) nounwind {
+; RV32I-ZALRSC-LABEL: atomicrmw_min_i64_seq_cst:
+; RV32I-ZALRSC:       # %bb.0:
+; RV32I-ZALRSC-NEXT:    addi sp, sp, -32
+; RV32I-ZALRSC-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT:    mv s0, a2
+; RV32I-ZALRSC-NEXT:    mv s1, a0
+; RV32I-ZALRSC-NEXT:    lw a4, 0(a0)
+; RV32I-ZALRSC-NEXT:    lw a5, 4(a0)
+; RV32I-ZALRSC-NEXT:    mv s2, a1
+; RV32I-ZALRSC-NEXT:    j .LBB5_2
+; RV32I-ZALRSC-NEXT:  .LBB5_1: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT:    # in Loop: Header=BB5_2 Depth=1
+; RV32I-ZALRSC-NEXT:    sw a4, 8(sp)
+; RV32I-ZALRSC-NEXT:    sw a5, 12(sp)
+; RV32I-ZALRSC-NEXT:    addi a1, sp, 8
+; RV32I-ZALRSC-NEXT:    li a4, 5
+; RV32I-ZALRSC-NEXT:    li a5, 5
+; RV32I-ZALRSC-NEXT:    mv a0, s1
+; RV32I-ZALRSC-NEXT:    call __atomic_compare_exchange_8
+; RV32I-ZALRSC-NEXT:    lw a4, 8(sp)
+; RV32I-ZALRSC-NEXT:    lw a5, 12(sp)
+; RV32I-ZALRSC-NEXT:    bnez a0, .LBB5_7
+; RV32I-ZALRSC-NEXT:  .LBB5_2: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT:    beq a5, s0, .LBB5_4
+; RV32I-ZALRSC-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT:    # in Loop: Header=BB5_2 Depth=1
+; RV32I-ZALRSC-NEXT:    slt a0, s0, a5
+; RV32I-ZALRSC-NEXT:    j .LBB5_5
+; RV32I-ZALRSC-NEXT:  .LBB5_4: # in Loop: Header=BB5_2 Depth=1
+; RV32I-ZALRSC-NEXT:    sltu a0, s2, a4
+; RV32I-ZALRSC-NEXT:  .LBB5_5: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT:    # in Loop: Header=BB5_2 Depth=1
+; RV32I-ZALRSC-NEXT:    mv a2, a4
+; RV32I-ZALRSC-NEXT:    mv a3, a5
+; RV32I-ZALRSC-NEXT:    beqz a0, .LBB5_1
+; RV32I-ZALRSC-NEXT:  # %bb.6: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT:    # in Loop: Header=BB5_2 Depth=1
+; RV32I-ZALRSC-NEXT:    mv a2, s2
+; RV32I-ZALRSC-NEXT:    mv a3, s0
+; RV32I-ZALRSC-NEXT:    j .LBB5_1
+; RV32I-ZALRSC-NEXT:  .LBB5_7: # %atomicrmw.end
+; RV32I-ZALRSC-NEXT:    mv a0, a4
+; RV32I-ZALRSC-NEXT:    mv a1, a5
+; RV32I-ZALRSC-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT:    addi sp, sp, 32
+; RV32I-ZALRSC-NEXT:    ret
+;
+; RV32IB-ZALRSC-LABEL: atomicrmw_min_i64_seq_cst:
+; RV32IB-ZALRSC:       # %bb.0:
+; RV32IB-ZALRSC-NEXT:    addi sp, sp, -32
+; RV32IB-ZALRSC-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32IB-ZALRSC-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32IB-ZALRSC-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32IB-ZALRSC-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
+; RV32IB-ZALRSC-NEXT:    mv s0, a2
+; RV32IB-ZALRSC-NEXT:    mv s1, a0
+; RV32IB-ZALRSC-NEXT:    lw a4, 0(a0)
+; RV32IB-ZALRSC-NEXT:    lw a5, 4(a0)
+; RV32IB-ZALRSC-NEXT:    mv s2, a1
+; RV32IB-ZALRSC-NEXT:    j .LBB5_2
+; RV32IB-ZALRSC-NEXT:  .LBB5_1: # %atomicrmw.start
+; RV32IB-ZALRSC-NEXT:    # in Loop: Header=BB5_2 Depth=1
+; RV32IB-ZALRSC-NEXT:    sw a4, 8(sp)
+; RV32IB-ZALRSC-NEXT:    sw a5, 12(sp)
+; RV32IB-ZALRSC-NEXT:    addi a1, sp, 8
+; RV32IB-ZALRSC-NEXT:    li a4, 5
+; RV32IB-ZALRSC-NEXT:    li a5, 5
+; RV32IB-ZALRSC-NEXT:    mv a0, s1
+; RV32IB-ZALRSC-NEXT:    call __atomic_compare_exchange_8
+; RV32IB-ZALRSC-NEXT:    lw a4, 8(sp)
+; RV32IB-ZALRSC-NEXT:    lw a5, 12(sp)
+; RV32IB-ZALRSC-NEXT:    bnez a0, .LBB5_7
+; RV32IB-ZALRSC-NEXT:  .LBB5_2: # %atomicrmw.start
+; RV32IB-ZALRSC-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32IB-ZALRSC-NEXT:    beq a5, s0, .LBB5_4
+; RV32IB-ZALRSC-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32IB-ZALRSC-NEXT:    # in Loop: Header=BB5_2 Depth=1
+; RV32IB-ZALRSC-NEXT:    slt a0, a5, s0
+; RV32IB-ZALRSC-NEXT:    j .LBB5_5
+; RV32IB-ZALRSC-NEXT:  .LBB5_4: # in Loop: Header=BB5_2 Depth=1
+; RV32IB-ZALRSC-NEXT:    sltu a0, a4, s2
+; RV32IB-ZALRSC-NEXT:  .LBB5_5: # %atomicrmw.start
+; RV32IB-ZALRSC-NEXT:    # in Loop: Header=BB5_2 Depth=1
+; RV32IB-ZALRSC-NEXT:    mv a2, a4
+; RV32IB-ZALRSC-NEXT:    mv a3, a5
+; RV32IB-ZALRSC-NEXT:    bnez a0, .LBB5_1
+; RV32IB-ZALRSC-NEXT:  # %bb.6: # %atomicrmw.start
+; RV32IB-ZALRSC-NEXT:    # in Loop: Header=BB5_2 Depth=1
+; RV32IB-ZALRSC-NEXT:    mv a2, s2
+; RV32IB-ZALRSC-NEXT:    mv a3, s0
+; RV32IB-ZALRSC-NEXT:    j .LBB5_1
+; RV32IB-ZALRSC-NEXT:  .LBB5_7: # %atomicrmw.end
+; RV32IB-ZALRSC-NEXT:    mv a0, a4
+; RV32IB-ZALRSC-NEXT:    mv a1, a5
+; RV32IB-ZALRSC-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32IB-ZALRSC-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32IB-ZALRSC-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32IB-ZALRSC-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32IB-ZALRSC-NEXT:    addi sp, sp, 32
+; RV32IB-ZALRSC-NEXT:    ret
+;
+; RV32IA-LABEL: atomicrmw_min_i64_seq_cst:
+; RV32IA:       # %bb.0:
+; RV32IA-NEXT:    addi sp, sp, -32
+; RV32IA-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    mv s0, a2
+; RV32IA-NEXT:    mv s1, a0
+; RV32IA-NEXT:    lw a4, 0(a0)
+; RV32IA-NEXT:    lw a5, 4(a0)
+; RV32IA-NEXT:    mv s2, a1
+; RV32IA-NEXT:    j .LBB5_2
+; RV32IA-NEXT:  .LBB5_1: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB5_2 Depth=1
+; RV32IA-NEXT:    sw a4, 8(sp)
+; RV32IA-NEXT:    sw a5, 12(sp)
+; RV32IA-NEXT:    addi a1, sp, 8
+; RV32IA-NEXT:    li a4, 5
+; RV32IA-NEXT:    li a5, 5
+; RV32IA-NEXT:    mv a0, s1
+; RV32IA-NEXT:    call __atomic_compare_exchange_8
+; RV32IA-NEXT:    lw a4, 8(sp)
+; RV32IA-NEXT:    lw a5, 12(sp)
+; RV32IA-NEXT:    bnez a0, .LBB5_7
+; RV32IA-NEXT:  .LBB5_2: # %atomicrmw.start
+; RV32IA-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:    beq a5, s0, .LBB5_4
+; RV32IA-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB5_2 Depth=1
+; RV32IA-NEXT:    slt a0, s0, a5
+; RV32IA-NEXT:    j .LBB5_5
+; RV32IA-NEXT:  .LBB5_4: # in Loop: Header=BB5_2 Depth=1
+; RV32IA-NEXT:    sltu a0, s2, a4
+; RV32IA-NEXT:  .LBB5_5: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB5_2 Depth=1
+; RV32IA-NEXT:    mv a2, a4
+; RV32IA-NEXT:    mv a3, a5
+; RV32IA-NEXT:    beqz a0, .LBB5_1
+; RV32IA-NEXT:  # %bb.6: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB5_2 Depth=1
+; RV32IA-NEXT:    mv a2, s2
+; RV32IA-NEXT:    mv a3, s0
+; RV32IA-NEXT:    j .LBB5_1
+; RV32IA-NEXT:  .LBB5_7: # %atomicrmw.end
+; RV32IA-NEXT:    mv a0, a4
+; RV32IA-NEXT:    mv a1, a5
+; RV32IA-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    addi sp, sp, 32
+; RV32IA-NEXT:    ret
+;
+; RV64I-ZALRSC-LABEL: atomicrmw_min_i64_seq_cst:
+; RV64I-ZALRSC:       # %bb.0:
+; RV64I-ZALRSC-NEXT:  .LBB5_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT:    lr.d.aqrl a2, (a0)
+; RV64I-ZALRSC-NEXT:    mv a3, a2
+; RV64I-ZALRSC-NEXT:    bge a1, a3, .LBB5_3
+; RV64I-ZALRSC-NEXT:  # %bb.2: # in Loop: Header=BB5_1 Depth=1
+; RV64I-ZALRSC-NEXT:    mv a3, a1
+; RV64I-ZALRSC-NEXT:  .LBB5_3: # in Loop: Header=BB5_1 Depth=1
+; RV64I-ZALRSC-NEXT:    sc.d.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT:    bnez a3, .LBB5_1
+; RV64I-ZALRSC-NEXT:  # %bb.4:
+; RV64I-ZALRSC-NEXT:    mv a0, a2
+; RV64I-ZALRSC-NEXT:    ret
+;
+; RV64IB-ZALRSC-LABEL: atomicrmw_min_i64_seq_cst:
+; RV64IB-ZALRSC:       # %bb.0:
+; RV64IB-ZALRSC-NEXT:  .LBB5_1: # =>This Inner Loop Header: Depth=1
+; RV64IB-ZALRSC-NEXT:    lr.d.aqrl a2, (a0)
+; RV64IB-ZALRSC-NEXT:    min a3, a2, a1
+; RV64IB-ZALRSC-NEXT:    sc.d.rl a3, a3, (a0)
+; RV64IB-ZALRSC-NEXT:    bnez a3, .LBB5_1
+; RV64IB-ZALRSC-NEXT:  # %bb.2:
+; RV64IB-ZALRSC-NEXT:    mv a0, a2
+; RV64IB-ZALRSC-NEXT:    ret
+;
+; RV64IA-LABEL: atomicrmw_min_i64_seq_cst:
+; RV64IA:       # %bb.0:
+; RV64IA-NEXT:    amomin.d.aqrl a0, a1, (a0)
+; RV64IA-NEXT:    ret
+  %1 = atomicrmw min ptr %a, i64 %b seq_cst
+  ret i64 %1
+}
+
+define i64 @atomicrmw_umax_i64_seq_cst(ptr %a, i64 %b) nounwind {
+; RV32I-ZALRSC-LABEL: atomicrmw_umax_i64_seq_cst:
+; RV32I-ZALRSC:       # %bb.0:
+; RV32I-ZALRSC-NEXT:    addi sp, sp, -32
+; RV32I-ZALRSC-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT:    mv s0, a2
+; RV32I-ZALRSC-NEXT:    mv s1, a0
+; RV32I-ZALRSC-NEXT:    lw a4, 0(a0)
+; RV32I-ZALRSC-NEXT:    lw a5, 4(a0)
+; RV32I-ZALRSC-NEXT:    mv s2, a1
+; RV32I-ZALRSC-NEXT:    j .LBB6_2
+; RV32I-ZALRSC-NEXT:  .LBB6_1: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT:    # in Loop: Header=BB6_2 Depth=1
+; RV32I-ZALRSC-NEXT:    sw a4, 8(sp)
+; RV32I-ZALRSC-NEXT:    sw a5, 12(sp)
+; RV32I-ZALRSC-NEXT:    addi a1, sp, 8
+; RV32I-ZALRSC-NEXT:    li a4, 5
+; RV32I-ZALRSC-NEXT:    li a5, 5
+; RV32I-ZALRSC-NEXT:    mv a0, s1
+; RV32I-ZALRSC-NEXT:    call __atomic_compare_exchange_8
+; RV32I-ZALRSC-NEXT:    lw a4, 8(sp)
+; RV32I-ZALRSC-NEXT:    lw a5, 12(sp)
+; RV32I-ZALRSC-NEXT:    bnez a0, .LBB6_7
+; RV32I-ZALRSC-NEXT:  .LBB6_2: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT:    beq a5, s0, .LBB6_4
+; RV32I-ZALRSC-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT:    # in Loop: Header=BB6_2 Depth=1
+; RV32I-ZALRSC-NEXT:    sltu a0, s0, a5
+; RV32I-ZALRSC-NEXT:    j .LBB6_5
+; RV32I-ZALRSC-NEXT:  .LBB6_4: # in Loop: Header=BB6_2 Depth=1
+; RV32I-ZALRSC-NEXT:    sltu a0, s2, a4
+; RV32I-ZALRSC-NEXT:  .LBB6_5: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT:    # in Loop: Header=BB6_2 Depth=1
+; RV32I-ZALRSC-NEXT:    mv a2, a4
+; RV32I-ZALRSC-NEXT:    mv a3, a5
+; RV32I-ZALRSC-NEXT:    bnez a0, .LBB6_1
+; RV32I-ZALRSC-NEXT:  # %bb.6: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT:    # in Loop: Header=BB6_2 Depth=1
+; RV32I-ZALRSC-NEXT:    mv a2, s2
+; RV32I-ZALRSC-NEXT:    mv a3, s0
+; RV32I-ZALRSC-NEXT:    j .LBB6_1
+; RV32I-ZALRSC-NEXT:  .LBB6_7: # %atomicrmw.end
+; RV32I-ZALRSC-NEXT:    mv a0, a4
+; RV32I-ZALRSC-NEXT:    mv a1, a5
+; RV32I-ZALRSC-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT:    addi sp, sp, 32
+; RV32I-ZALRSC-NEXT:    ret
+;
+; RV32IB-ZALRSC-LABEL: atomicrmw_umax_i64_seq_cst:
+; RV32IB-ZALRSC:       # %bb.0:
+; RV32IB-ZALRSC-NEXT:    addi sp, sp, -32
+; RV32IB-ZALRSC-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32IB-ZALRSC-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32IB-ZALRSC-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32IB-ZALRSC-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
+; RV32IB-ZALRSC-NEXT:    mv s0, a2
+; RV32IB-ZALRSC-NEXT:    mv s1, a0
+; RV32IB-ZALRSC-NEXT:    lw a4, 0(a0)
+; RV32IB-ZALRSC-NEXT:    lw a5, 4(a0)
+; RV32IB-ZALRSC-NEXT:    mv s2, a1
+; RV32IB-ZALRSC-NEXT:    j .LBB6_2
+; RV32IB-ZALRSC-NEXT:  .LBB6_1: # %atomicrmw.start
+; RV32IB-ZALRSC-NEXT:    # in Loop: Header=BB6_2 Depth=1
+; RV32IB-ZALRSC-NEXT:    sw a4, 8(sp)
+; RV32IB-ZALRSC-NEXT:    sw a5, 12(sp)
+; RV32IB-ZALRSC-NEXT:    addi a1, sp, 8
+; RV32IB-ZALRSC-NEXT:    li a4, 5
+; RV32IB-ZALRSC-NEXT:    li a5, 5
+; RV32IB-ZALRSC-NEXT:    mv a0, s1
+; RV32IB-ZALRSC-NEXT:    call __atomic_compare_exchange_8
+; RV32IB-ZALRSC-NEXT:    lw a4, 8(sp)
+; RV32IB-ZALRSC-NEXT:    lw a5, 12(sp)
+; RV32IB-ZALRSC-NEXT:    bnez a0, .LBB6_7
+; RV32IB-ZALRSC-NEXT:  .LBB6_2: # %atomicrmw.start
+; RV32IB-ZALRSC-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32IB-ZALRSC-NEXT:    beq a5, s0, .LBB6_4
+; RV32IB-ZALRSC-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32IB-ZALRSC-NEXT:    # in Loop: Header=BB6_2 Depth=1
+; RV32IB-ZALRSC-NEXT:    sltu a0, s0, a5
+; RV32IB-ZALRSC-NEXT:    j .LBB6_5
+; RV32IB-ZALRSC-NEXT:  .LBB6_4: # in Loop: Header=BB6_2 Depth=1
+; RV32IB-ZALRSC-NEXT:    sltu a0, s2, a4
+; RV32IB-ZALRSC-NEXT:  .LBB6_5: # %atomicrmw.start
+; RV32IB-ZALRSC-NEXT:    # in Loop: Header=BB6_2 Depth=1
+; RV32IB-ZALRSC-NEXT:    mv a2, a4
+; RV32IB-ZALRSC-NEXT:    mv a3, a5
+; RV32IB-ZALRSC-NEXT:    bnez a0, .LBB6_1
+; RV32IB-ZALRSC-NEXT:  # %bb.6: # %atomicrmw.start
+; RV32IB-ZALRSC-NEXT:    # in Loop: Header=BB6_2 Depth=1
+; RV32IB-ZALRSC-NEXT:    mv a2, s2
+; RV32IB-ZALRSC-NEXT:    mv a3, s0
+; RV32IB-ZALRSC-NEXT:    j .LBB6_1
+; RV32IB-ZALRSC-NEXT:  .LBB6_7: # %atomicrmw.end
+; RV32IB-ZALRSC-NEXT:    mv a0, a4
+; RV32IB-ZALRSC-NEXT:    mv a1, a5
+; RV32IB-ZALRSC-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32IB-ZALRSC-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32IB-ZALRSC-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32IB-ZALRSC-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32IB-ZALRSC-NEXT:    addi sp, sp, 32
+; RV32IB-ZALRSC-NEXT:    ret
+;
+; RV32IA-LABEL: atomicrmw_umax_i64_seq_cst:
+; RV32IA:       # %bb.0:
+; RV32IA-NEXT:    addi sp, sp, -32
+; RV32IA-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    mv s0, a2
+; RV32IA-NEXT:    mv s1, a0
+; RV32IA-NEXT:    lw a4, 0(a0)
+; RV32IA-NEXT:    lw a5, 4(a0)
+; RV32IA-NEXT:    mv s2, a1
+; RV32IA-NEXT:    j .LBB6_2
+; RV32IA-NEXT:  .LBB6_1: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB6_2 Depth=1
+; RV32IA-NEXT:    sw a4, 8(sp)
+; RV32IA-NEXT:    sw a5, 12(sp)
+; RV32IA-NEXT:    addi a1, sp, 8
+; RV32IA-NEXT:    li a4, 5
+; RV32IA-NEXT:    li a5, 5
+; RV32IA-NEXT:    mv a0, s1
+; RV32IA-NEXT:    call __atomic_compare_exchange_8
+; RV32IA-NEXT:    lw a4, 8(sp)
+; RV32IA-NEXT:    lw a5, 12(sp)
+; RV32IA-NEXT:    bnez a0, .LBB6_7
+; RV32IA-NEXT:  .LBB6_2: # %atomicrmw.start
+; RV32IA-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:    beq a5, s0, .LBB6_4
+; RV32IA-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB6_2 Depth=1
+; RV32IA-NEXT:    sltu a0, s0, a5
+; RV32IA-NEXT:    j .LBB6_5
+; RV32IA-NEXT:  .LBB6_4: # in Loop: Header=BB6_2 Depth=1
+; RV32IA-NEXT:    sltu a0, s2, a4
+; RV32IA-NEXT:  .LBB6_5: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB6_2 Depth=1
+; RV32IA-NEXT:    mv a2, a4
+; RV32IA-NEXT:    mv a3, a5
+; RV32IA-NEXT:    bnez a0, .LBB6_1
+; RV32IA-NEXT:  # %bb.6: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB6_2 Depth=1
+; RV32IA-NEXT:    mv a2, s2
+; RV32IA-NEXT:    mv a3, s0
+; RV32IA-NEXT:    j .LBB6_1
+; RV32IA-NEXT:  .LBB6_7: # %atomicrmw.end
+; RV32IA-NEXT:    mv a0, a4
+; RV32IA-NEXT:    mv a1, a5
+; RV32IA-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    addi sp, sp, 32
+; RV32IA-NEXT:    ret
+;
+; RV64I-ZALRSC-LABEL: atomicrmw_umax_i64_seq_cst:
+; RV64I-ZALRSC:       # %bb.0:
+; RV64I-ZALRSC-NEXT:  .LBB6_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT:    lr.d.aqrl a2, (a0)
+; RV64I-ZALRSC-NEXT:    mv a3, a2
+; RV64I-ZALRSC-NEXT:    bgeu a3, a1, .LBB6_3
+; RV64I-ZALRSC-NEXT:  # %bb.2: # in Loop: Header=BB6_1 Depth=1
+; RV64I-ZALRSC-NEXT:    mv a3, a1
+; RV64I-ZALRSC-NEXT:  .LBB6_3: # in Loop: Header=BB6_1 Depth=1
+; RV64I-ZALRSC-NEXT:    sc.d.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT:    bnez a3, .LBB6_1
+; RV64I-ZALRSC-NEXT:  # %bb.4:
+; RV64I-ZALRSC-NEXT:    mv a0, a2
+; RV64I-ZALRSC-NEXT:    ret
+;
+; RV64IB-ZALRSC-LABEL: atomicrmw_umax_i64_seq_cst:
+; RV64IB-ZALRSC:       # %bb.0:
+; RV64IB-ZALRSC-NEXT:  .LBB6_1: # =>This Inner Loop Header: Depth=1
+; RV64IB-ZALRSC-NEXT:    lr.d.aqrl a2, (a0)
+; RV64IB-ZALRSC-NEXT:    maxu a3, a2, a1
+; RV64IB-ZALRSC-NEXT:    sc.d.rl a3, a3, (a0)
+; RV64IB-ZALRSC-NEXT:    bnez a3, .LBB6_1
+; RV64IB-ZALRSC-NEXT:  # %bb.2:
+; RV64IB-ZALRSC-NEXT:    mv a0, a2
+; RV64IB-ZALRSC-NEXT:    ret
+;
+; RV64IA-LABEL: atomicrmw_umax_i64_seq_cst:
+; RV64IA:       # %bb.0:
+; RV64IA-NEXT:    amomaxu.d.aqrl a0, a1, (a0)
+; RV64IA-NEXT:    ret
+  %1 = atomicrmw umax ptr %a, i64 %b seq_cst
+  ret i64 %1
+}
+
+define i64 @atomicrmw_umin_i64_seq_cst(ptr %a, i64 %b) nounwind {
+; RV32I-ZALRSC-LABEL: atomicrmw_umin_i64_seq_cst:
+; RV32I-ZALRSC:       # %bb.0:
+; RV32I-ZALRSC-NEXT:    addi sp, sp, -32
+; RV32I-ZALRSC-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT:    mv s0, a2
+; RV32I-ZALRSC-NEXT:    mv s1, a0
+; RV32I-ZALRSC-NEXT:    lw a4, 0(a0)
+; RV32I-ZALRSC-NEXT:    lw a5, 4(a0)
+; RV32I-ZALRSC-NEXT:    mv s2, a1
+; RV32I-ZALRSC-NEXT:    j .LBB7_2
+; RV32I-ZALRSC-NEXT:  .LBB7_1: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT:    # in Loop: Header=BB7_2 Depth=1
+; RV32I-ZALRSC-NEXT:    sw a4, 8(sp)
+; RV32I-ZALRSC-NEXT:    sw a5, 12(sp)
+; RV32I-ZALRSC-NEXT:    addi a1, sp, 8
+; RV32I-ZALRSC-NEXT:    li a4, 5
+; RV32I-ZALRSC-NEXT:    li a5, 5
+; RV32I-ZALRSC-NEXT:    mv a0, s1
+; RV32I-ZALRSC-NEXT:    call __atomic_compare_exchange_8
+; RV32I-ZALRSC-NEXT:    lw a4, 8(sp)
+; RV32I-ZALRSC-NEXT:    lw a5, 12(sp)
+; RV32I-ZALRSC-NEXT:    bnez a0, .LBB7_7
+; RV32I-ZALRSC-NEXT:  .LBB7_2: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT:    beq a5, s0, .LBB7_4
+; RV32I-ZALRSC-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT:    # in Loop: Header=BB7_2 Depth=1
+; RV32I-ZALRSC-NEXT:    sltu a0, s0, a5
+; RV32I-ZALRSC-NEXT:    j .LBB7_5
+; RV32I-ZALRSC-NEXT:  .LBB7_4: # in Loop: Header=BB7_2 Depth=1
+; RV32I-ZALRSC-NEXT:    sltu a0, s2, a4
+; RV32I-ZALRSC-NEXT:  .LBB7_5: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT:    # in Loop: Header=BB7_2 Depth=1
+; RV32I-ZALRSC-NEXT:    mv a2, a4
+; RV32I-ZALRSC-NEXT:    mv a3, a5
+; RV32I-ZALRSC-NEXT:    beqz a0, .LBB7_1
+; RV32I-ZALRSC-NEXT:  # %bb.6: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT:    # in Loop: Header=BB7_2 Depth=1
+; RV32I-ZALRSC-NEXT:    mv a2, s2
+; RV32I-ZALRSC-NEXT:    mv a3, s0
+; RV32I-ZALRSC-NEXT:    j .LBB7_1
+; RV32I-ZALRSC-NEXT:  .LBB7_7: # %atomicrmw.end
+; RV32I-ZALRSC-NEXT:    mv a0, a4
+; RV32I-ZALRSC-NEXT:    mv a1, a5
+; RV32I-ZALRSC-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT:    addi sp, sp, 32
+; RV32I-ZALRSC-NEXT:    ret
+;
+; RV32IB-ZALRSC-LABEL: atomicrmw_umin_i64_seq_cst:
+; RV32IB-ZALRSC:       # %bb.0:
+; RV32IB-ZALRSC-NEXT:    addi sp, sp, -32
+; RV32IB-ZALRSC-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32IB-ZALRSC-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32IB-ZALRSC-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32IB-ZALRSC-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
+; RV32IB-ZALRSC-NEXT:    mv s0, a2
+; RV32IB-ZALRSC-NEXT:    mv s1, a0
+; RV32IB-ZALRSC-NEXT:    lw a4, 0(a0)
+; RV32IB-ZALRSC-NEXT:    lw a5, 4(a0)
+; RV32IB-ZALRSC-NEXT:    mv s2, a1
+; RV32IB-ZALRSC-NEXT:    j .LBB7_2
+; RV32IB-ZALRSC-NEXT:  .LBB7_1: # %atomicrmw.start
+; RV32IB-ZALRSC-NEXT:    # in Loop: Header=BB7_2 Depth=1
+; RV32IB-ZALRSC-NEXT:    sw a4, 8(sp)
+; RV32IB-ZALRSC-NEXT:    sw a5, 12(sp)
+; RV32IB-ZALRSC-NEXT:    addi a1, sp, 8
+; RV32IB-ZALRSC-NEXT:    li a4, 5
+; RV32IB-ZALRSC-NEXT:    li a5, 5
+; RV32IB-ZALRSC-NEXT:    mv a0, s1
+; RV32IB-ZALRSC-NEXT:    call __atomic_compare_exchange_8
+; RV32IB-ZALRSC-NEXT:    lw a4, 8(sp)
+; RV32IB-ZALRSC-NEXT:    lw a5, 12(sp)
+; RV32IB-ZALRSC-NEXT:    bnez a0, .LBB7_7
+; RV32IB-ZALRSC-NEXT:  .LBB7_2: # %atomicrmw.start
+; RV32IB-ZALRSC-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32IB-ZALRSC-NEXT:    beq a5, s0, .LBB7_4
+; RV32IB-ZALRSC-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32IB-ZALRSC-NEXT:    # in Loop: Header=BB7_2 Depth=1
+; RV32IB-ZALRSC-NEXT:    sltu a0, a5, s0
+; RV32IB-ZALRSC-NEXT:    j .LBB7_5
+; RV32IB-ZALRSC-NEXT:  .LBB7_4: # in Loop: Header=BB7_2 Depth=1
+; RV32IB-ZALRSC-NEXT:    sltu a0, a4, s2
+; RV32IB-ZALRSC-NEXT:  .LBB7_5: # %atomicrmw.start
+; RV32IB-ZALRSC-NEXT:    # in Loop: Header=BB7_2 Depth=1
+; RV32IB-ZALRSC-NEXT:    mv a2, a4
+; RV32IB-ZALRSC-NEXT:    mv a3, a5
+; RV32IB-ZALRSC-NEXT:    bnez a0, .LBB7_1
+; RV32IB-ZALRSC-NEXT:  # %bb.6: # %atomicrmw.start
+; RV32IB-ZALRSC-NEXT:    # in Loop: Header=BB7_2 Depth=1
+; RV32IB-ZALRSC-NEXT:    mv a2, s2
+; RV32IB-ZALRSC-NEXT:    mv a3, s0
+; RV32IB-ZALRSC-NEXT:    j .LBB7_1
+; RV32IB-ZALRSC-NEXT:  .LBB7_7: # %atomicrmw.end
+; RV32IB-ZALRSC-NEXT:    mv a0, a4
+; RV32IB-ZALRSC-NEXT:    mv a1, a5
+; RV32IB-ZALRSC-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32IB-ZALRSC-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32IB-ZALRSC-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32IB-ZALRSC-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32IB-ZALRSC-NEXT:    addi sp, sp, 32
+; RV32IB-ZALRSC-NEXT:    ret
+;
+; RV32IA-LABEL: atomicrmw_umin_i64_seq_cst:
+; RV32IA:       # %bb.0:
+; RV32IA-NEXT:    addi sp, sp, -32
+; RV32IA-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
+; RV32IA-NEXT:    mv s0, a2
+; RV32IA-NEXT:    mv s1, a0
+; RV32IA-NEXT:    lw a4, 0(a0)
+; RV32IA-NEXT:    lw a5, 4(a0)
+; RV32IA-NEXT:    mv s2, a1
+; RV32IA-NEXT:    j .LBB7_2
+; RV32IA-NEXT:  .LBB7_1: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB7_2 Depth=1
+; RV32IA-NEXT:    sw a4, 8(sp)
+; RV32IA-NEXT:    sw a5, 12(sp)
+; RV32IA-NEXT:    addi a1, sp, 8
+; RV32IA-NEXT:    li a4, 5
+; RV32IA-NEXT:    li a5, 5
+; RV32IA-NEXT:    mv a0, s1
+; RV32IA-NEXT:    call __atomic_compare_exchange_8
+; RV32IA-NEXT:    lw a4, 8(sp)
+; RV32IA-NEXT:    lw a5, 12(sp)
+; RV32IA-NEXT:    bnez a0, .LBB7_7
+; RV32IA-NEXT:  .LBB7_2: # %atomicrmw.start
+; RV32IA-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:    beq a5, s0, .LBB7_4
+; RV32IA-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB7_2 Depth=1
+; RV32IA-NEXT:    sltu a0, s0, a5
+; RV32IA-NEXT:    j .LBB7_5
+; RV32IA-NEXT:  .LBB7_4: # in Loop: Header=BB7_2 Depth=1
+; RV32IA-NEXT:    sltu a0, s2, a4
+; RV32IA-NEXT:  .LBB7_5: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB7_2 Depth=1
+; RV32IA-NEXT:    mv a2, a4
+; RV32IA-NEXT:    mv a3, a5
+; RV32IA-NEXT:    beqz a0, .LBB7_1
+; RV32IA-NEXT:  # %bb.6: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB7_2 Depth=1
+; RV32IA-NEXT:    mv a2, s2
+; RV32IA-NEXT:    mv a3, s0
+; RV32IA-NEXT:    j .LBB7_1
+; RV32IA-NEXT:  .LBB7_7: # %atomicrmw.end
+; RV32IA-NEXT:    mv a0, a4
+; RV32IA-NEXT:    mv a1, a5
+; RV32IA-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32IA-NEXT:    addi sp, sp, 32
+; RV32IA-NEXT:    ret
+;
+; RV64I-ZALRSC-LABEL: atomicrmw_umin_i64_seq_cst:
+; RV64I-ZALRSC:       # %bb.0:
+; RV64I-ZALRSC-NEXT:  .LBB7_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT:    lr.d.aqrl a2, (a0)
+; RV64I-ZALRSC-NEXT:    mv a3, a2
+; RV64I-ZALRSC-NEXT:    bgeu a1, a3, .LBB7_3
+; RV64I-ZALRSC-NEXT:  # %bb.2: # in Loop: Header=BB7_1 Depth=1
+; RV64I-ZALRSC-NEXT:    mv a3, a1
+; RV64I-ZALRSC-NEXT:  .LBB7_3: # in Loop: Header=BB7_1 Depth=1
+; RV64I-ZALRSC-NEXT:    sc.d.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT:    bnez a3, .LBB7_1
+; RV64I-ZALRSC-NEXT:  # %bb.4:
+; RV64I-ZALRSC-NEXT:    mv a0, a2
+; RV64I-ZALRSC-NEXT:    ret
+;
+; RV64IB-ZALRSC-LABEL: atomicrmw_umin_i64_seq_cst:
+; RV64IB-ZALRSC:       # %bb.0:
+; RV64IB-ZALRSC-NEXT:  .LBB7_1: # =>This Inner Loop Header: Depth=1
+; RV64IB-ZALRSC-NEXT:    lr.d.aqrl a2, (a0)
+; RV64IB-ZALRSC-NEXT:    minu a3, a2, a1
+; RV64IB-ZALRSC-NEXT:    sc.d.rl a3, a3, (a0)
+; RV64IB-ZALRSC-NEXT:    bnez a3, .LBB7_1
+; RV64IB-ZALRSC-NEXT:  # %bb.2:
+; RV64IB-ZALRSC-NEXT:    mv a0, a2
+; RV64IB-ZALRSC-NEXT:    ret
+;
+; RV64IA-LABEL: atomicrmw_umin_i64_seq_cst:
+; RV64IA:       # %bb.0:
+; RV64IA-NEXT:    amominu.d.aqrl a0, a1, (a0)
+; RV64IA-NEXT:    ret
+  %1 = atomicrmw umin ptr %a, i64 %b seq_cst
+  ret i64 %1
+}
diff --git a/llvm/test/CodeGen/RISCV/features-info.ll b/llvm/test/CodeGen/RISCV/features-info.ll
index 5e5f2b78e8869..37e11dbb12731 100644
--- a/llvm/test/CodeGen/RISCV/features-info.ll
+++ b/llvm/test/CodeGen/RISCV/features-info.ll
@@ -81,6 +81,7 @@
 ; CHECK-NEXT:   optimized-nf7-segment-load-store - vlseg7eN.v and vsseg7eN.v are implemented as a wide memory op and shuffle.
 ; CHECK-NEXT:   optimized-nf8-segment-load-store - vlseg8eN.v and vsseg8eN.v are implemented as a wide memory op and shuffle.
 ; CHECK-NEXT:   optimized-zero-stride-load       - Optimized (perform fewer memory operations)zero-stride vector load.
+; CHECK-NEXT:   permissive-zalrsc                - Implementation permits non-base instructions between LR/SC pairs.
 ; CHECK-NEXT:   predictable-select-expensive     - Prefer likely predicted branches over selects.
 ; CHECK-NEXT:   prefer-vsetvli-over-read-vlenb   - Prefer vsetvli over read vlenb CSR to calculate VLEN.
 ; CHECK-NEXT:   prefer-w-inst                    - Prefer instructions with W suffix.
>From 21a358e221a66ce4202539f33a6683e7d9f3a285 Mon Sep 17 00:00:00 2001
From: Stephan Lachowsky <slachowsky at apple.com>
Date: Fri, 24 Oct 2025 14:31:47 -0700
Subject: [PATCH 2/4] Fix RISC-V reference in comment
---
 llvm/lib/Target/RISCV/RISCVFeatures.td | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/llvm/lib/Target/RISCV/RISCVFeatures.td b/llvm/lib/Target/RISCV/RISCVFeatures.td
index 3c1e9665d823e..f01455c3e1974 100644
--- a/llvm/lib/Target/RISCV/RISCVFeatures.td
+++ b/llvm/lib/Target/RISCV/RISCVFeatures.td
@@ -1906,7 +1906,7 @@ def FeatureForcedAtomics : SubtargetFeature<
 def HasAtomicLdSt
     : Predicate<"Subtarget->hasStdExtZalrsc() || Subtarget->hasForcedAtomics()">;
 
-// The RISCV Unprivileged Architecture defines _constrained_ LR/SC loops:
+// The RISC-V Unprivileged Architecture defines _constrained_ LR/SC loops:
 //   The dynamic code executed between the LR and SC instructions can only
 //   contain instructions from the base ''I'' instruction set, excluding loads,
 //   stores, backward jumps, taken backward branches, JALR, FENCE, and SYSTEM
>From 84c01c16b17679de964b5be14186b35617586f20 Mon Sep 17 00:00:00 2001
From: Stephan Lachowsky <slachowsky at apple.com>
Date: Fri, 24 Oct 2025 17:58:46 -0700
Subject: [PATCH 3/4] Update RISCVFeatures.td
---
 llvm/lib/Target/RISCV/RISCVFeatures.td | 8 ++++++--
 1 file changed, 6 insertions(+), 2 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVFeatures.td b/llvm/lib/Target/RISCV/RISCVFeatures.td
index f01455c3e1974..baecb6ef94fcb 100644
--- a/llvm/lib/Target/RISCV/RISCVFeatures.td
+++ b/llvm/lib/Target/RISCV/RISCVFeatures.td
@@ -1906,7 +1906,10 @@ def FeatureForcedAtomics : SubtargetFeature<
 def HasAtomicLdSt
     : Predicate<"Subtarget->hasStdExtZalrsc() || Subtarget->hasForcedAtomics()">;
 
-// The RISC-V Unprivileged Architecture defines _constrained_ LR/SC loops:
+// The RISC-V Unprivileged Architecture - ISA Volume 1 (Version: 20250508)
+// [https://docs.riscv.org/reference/isa/_attachments/riscv-unprivileged.pdf]
+// in section 13.3. Eventual Success of Store-Conditional Instructions, defines
+// _constrained_ LR/SC loops:
 //   The dynamic code executed between the LR and SC instructions can only
 //   contain instructions from the base ''I'' instruction set, excluding loads,
 //   stores, backward jumps, taken backward branches, JALR, FENCE, and SYSTEM
@@ -1920,7 +1923,8 @@ def HasAtomicLdSt
 def FeaturePermissiveZalrsc
     : SubtargetFeature<
           "permissive-zalrsc", "HasPermissiveZalrsc", "true",
-          "Implementation permits non-base instructions between LR/SC pairs">;
+          "Implementation permits non-base instructions between LR/SC pairs",
+          [FeatureStdExtZalrsc]>;
 
 def FeatureTaggedGlobals : SubtargetFeature<"tagged-globals",
     "AllowTaggedGlobals",
>From c6566895ce3cbdfbd90a899fc221edfb7ba6e492 Mon Sep 17 00:00:00 2001
From: Stephan Lachowsky <slachowsky at apple.com>
Date: Fri, 24 Oct 2025 18:04:36 -0700
Subject: [PATCH 4/4] Update llvm/test/CodeGen/RISCV/atomic-rmw-minmax.ll
---
 llvm/test/CodeGen/RISCV/atomic-rmw-minmax.ll | 1392 ++++++------------
 1 file changed, 480 insertions(+), 912 deletions(-)
diff --git a/llvm/test/CodeGen/RISCV/atomic-rmw-minmax.ll b/llvm/test/CodeGen/RISCV/atomic-rmw-minmax.ll
index 9ce987c1add50..dae153fe87cc0 100644
--- a/llvm/test/CodeGen/RISCV/atomic-rmw-minmax.ll
+++ b/llvm/test/CodeGen/RISCV/atomic-rmw-minmax.ll
@@ -1,1074 +1,642 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+zalrsc -verify-machineinstrs < %s \
-; RUN:   | FileCheck -check-prefixes=RV32I-ZALRSC %s
-; RUN: llc -mtriple=riscv32 -mattr=+b,+zalrsc,+permissive-zalrsc -verify-machineinstrs < %s \
-; RUN:   | FileCheck -check-prefixes=RV32IB-ZALRSC %s
-; RUN: llc -mtriple=riscv32 -mattr=+a -verify-machineinstrs < %s \
-; RUN:   | FileCheck -check-prefixes=RV32IA %s
-;
-; RUN: llc -mtriple=riscv64 -mattr=+zalrsc -verify-machineinstrs < %s \
-; RUN:   | FileCheck -check-prefixes=RV64I-ZALRSC %s
-; RUN: llc -mtriple=riscv64 -mattr=+b,+zalrsc,+permissive-zalrsc -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv32 -mattr=+b,+zalrsc -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefixes=RV32IB-COMMON,RV32IB-ZALRSC %s
+; RUN: llc -mtriple=riscv32 -mattr=+b,+permissive-zalrsc -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefixes=RV32IB-COMMON,RV32IB-ZALRSC-PERM %s
+; RUN: llc -mtriple=riscv32 -mattr=+b,+permissive-zalrsc,+a -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefixes=RV32IB-COMMON,RV32IAB %s
+;
+; RUN: llc -mtriple=riscv64 -mattr=+b,+zalrsc -verify-machineinstrs < %s \
 ; RUN:   | FileCheck -check-prefixes=RV64IB-ZALRSC %s
-; RUN: llc -mtriple=riscv64 -mattr=+a -verify-machineinstrs < %s \
-; RUN:   | FileCheck -check-prefixes=RV64IA %s
+; RUN: llc -mtriple=riscv64 -mattr=+b,+permissive-zalrsc -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefixes=RV64IB-ZALRSC-PERM %s
+; RUN: llc -mtriple=riscv64 -mattr=+b,+permissive-zalrsc,+a -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefixes=RV64IAB %s
 
 define i32 @atomicrmw_max_i32_seq_cst(ptr %a, i32 %b) nounwind {
-; RV32I-ZALRSC-LABEL: atomicrmw_max_i32_seq_cst:
-; RV32I-ZALRSC:       # %bb.0:
-; RV32I-ZALRSC-NEXT:  .LBB0_1: # =>This Inner Loop Header: Depth=1
-; RV32I-ZALRSC-NEXT:    lr.w.aqrl a2, (a0)
-; RV32I-ZALRSC-NEXT:    mv a3, a2
-; RV32I-ZALRSC-NEXT:    bge a3, a1, .LBB0_3
-; RV32I-ZALRSC-NEXT:  # %bb.2: # in Loop: Header=BB0_1 Depth=1
-; RV32I-ZALRSC-NEXT:    mv a3, a1
-; RV32I-ZALRSC-NEXT:  .LBB0_3: # in Loop: Header=BB0_1 Depth=1
-; RV32I-ZALRSC-NEXT:    sc.w.rl a3, a3, (a0)
-; RV32I-ZALRSC-NEXT:    bnez a3, .LBB0_1
-; RV32I-ZALRSC-NEXT:  # %bb.4:
-; RV32I-ZALRSC-NEXT:    mv a0, a2
-; RV32I-ZALRSC-NEXT:    ret
-;
 ; RV32IB-ZALRSC-LABEL: atomicrmw_max_i32_seq_cst:
 ; RV32IB-ZALRSC:       # %bb.0:
 ; RV32IB-ZALRSC-NEXT:  .LBB0_1: # =>This Inner Loop Header: Depth=1
 ; RV32IB-ZALRSC-NEXT:    lr.w.aqrl a2, (a0)
-; RV32IB-ZALRSC-NEXT:    max a3, a2, a1
+; RV32IB-ZALRSC-NEXT:    mv a3, a2
+; RV32IB-ZALRSC-NEXT:    bge a3, a1, .LBB0_3
+; RV32IB-ZALRSC-NEXT:  # %bb.2: # in Loop: Header=BB0_1 Depth=1
+; RV32IB-ZALRSC-NEXT:    mv a3, a1
+; RV32IB-ZALRSC-NEXT:  .LBB0_3: # in Loop: Header=BB0_1 Depth=1
 ; RV32IB-ZALRSC-NEXT:    sc.w.rl a3, a3, (a0)
 ; RV32IB-ZALRSC-NEXT:    bnez a3, .LBB0_1
-; RV32IB-ZALRSC-NEXT:  # %bb.2:
+; RV32IB-ZALRSC-NEXT:  # %bb.4:
 ; RV32IB-ZALRSC-NEXT:    mv a0, a2
 ; RV32IB-ZALRSC-NEXT:    ret
 ;
-; RV32IA-LABEL: atomicrmw_max_i32_seq_cst:
-; RV32IA:       # %bb.0:
-; RV32IA-NEXT:    amomax.w.aqrl a0, a1, (a0)
-; RV32IA-NEXT:    ret
-;
-; RV64I-ZALRSC-LABEL: atomicrmw_max_i32_seq_cst:
-; RV64I-ZALRSC:       # %bb.0:
-; RV64I-ZALRSC-NEXT:    sext.w a2, a1
-; RV64I-ZALRSC-NEXT:  .LBB0_1: # =>This Inner Loop Header: Depth=1
-; RV64I-ZALRSC-NEXT:    lr.w.aqrl a1, (a0)
-; RV64I-ZALRSC-NEXT:    mv a3, a1
-; RV64I-ZALRSC-NEXT:    bge a3, a2, .LBB0_3
-; RV64I-ZALRSC-NEXT:  # %bb.2: # in Loop: Header=BB0_1 Depth=1
-; RV64I-ZALRSC-NEXT:    mv a3, a2
-; RV64I-ZALRSC-NEXT:  .LBB0_3: # in Loop: Header=BB0_1 Depth=1
-; RV64I-ZALRSC-NEXT:    sc.w.rl a3, a3, (a0)
-; RV64I-ZALRSC-NEXT:    bnez a3, .LBB0_1
-; RV64I-ZALRSC-NEXT:  # %bb.4:
-; RV64I-ZALRSC-NEXT:    mv a0, a1
-; RV64I-ZALRSC-NEXT:    ret
+; RV32IB-ZALRSC-PERM-LABEL: atomicrmw_max_i32_seq_cst:
+; RV32IB-ZALRSC-PERM:       # %bb.0:
+; RV32IB-ZALRSC-PERM-NEXT:  .LBB0_1: # =>This Inner Loop Header: Depth=1
+; RV32IB-ZALRSC-PERM-NEXT:    lr.w.aqrl a2, (a0)
+; RV32IB-ZALRSC-PERM-NEXT:    max a3, a2, a1
+; RV32IB-ZALRSC-PERM-NEXT:    sc.w.rl a3, a3, (a0)
+; RV32IB-ZALRSC-PERM-NEXT:    bnez a3, .LBB0_1
+; RV32IB-ZALRSC-PERM-NEXT:  # %bb.2:
+; RV32IB-ZALRSC-PERM-NEXT:    mv a0, a2
+; RV32IB-ZALRSC-PERM-NEXT:    ret
+;
+; RV32IAB-LABEL: atomicrmw_max_i32_seq_cst:
+; RV32IAB:       # %bb.0:
+; RV32IAB-NEXT:    amomax.w.aqrl a0, a1, (a0)
+; RV32IAB-NEXT:    ret
 ;
 ; RV64IB-ZALRSC-LABEL: atomicrmw_max_i32_seq_cst:
 ; RV64IB-ZALRSC:       # %bb.0:
 ; RV64IB-ZALRSC-NEXT:    sext.w a2, a1
 ; RV64IB-ZALRSC-NEXT:  .LBB0_1: # =>This Inner Loop Header: Depth=1
 ; RV64IB-ZALRSC-NEXT:    lr.w.aqrl a1, (a0)
-; RV64IB-ZALRSC-NEXT:    max a3, a1, a2
+; RV64IB-ZALRSC-NEXT:    mv a3, a1
+; RV64IB-ZALRSC-NEXT:    bge a3, a2, .LBB0_3
+; RV64IB-ZALRSC-NEXT:  # %bb.2: # in Loop: Header=BB0_1 Depth=1
+; RV64IB-ZALRSC-NEXT:    mv a3, a2
+; RV64IB-ZALRSC-NEXT:  .LBB0_3: # in Loop: Header=BB0_1 Depth=1
 ; RV64IB-ZALRSC-NEXT:    sc.w.rl a3, a3, (a0)
 ; RV64IB-ZALRSC-NEXT:    bnez a3, .LBB0_1
-; RV64IB-ZALRSC-NEXT:  # %bb.2:
+; RV64IB-ZALRSC-NEXT:  # %bb.4:
 ; RV64IB-ZALRSC-NEXT:    mv a0, a1
 ; RV64IB-ZALRSC-NEXT:    ret
 ;
-; RV64IA-LABEL: atomicrmw_max_i32_seq_cst:
-; RV64IA:       # %bb.0:
-; RV64IA-NEXT:    amomax.w.aqrl a0, a1, (a0)
-; RV64IA-NEXT:    ret
+; RV64IB-ZALRSC-PERM-LABEL: atomicrmw_max_i32_seq_cst:
+; RV64IB-ZALRSC-PERM:       # %bb.0:
+; RV64IB-ZALRSC-PERM-NEXT:    sext.w a2, a1
+; RV64IB-ZALRSC-PERM-NEXT:  .LBB0_1: # =>This Inner Loop Header: Depth=1
+; RV64IB-ZALRSC-PERM-NEXT:    lr.w.aqrl a1, (a0)
+; RV64IB-ZALRSC-PERM-NEXT:    max a3, a1, a2
+; RV64IB-ZALRSC-PERM-NEXT:    sc.w.rl a3, a3, (a0)
+; RV64IB-ZALRSC-PERM-NEXT:    bnez a3, .LBB0_1
+; RV64IB-ZALRSC-PERM-NEXT:  # %bb.2:
+; RV64IB-ZALRSC-PERM-NEXT:    mv a0, a1
+; RV64IB-ZALRSC-PERM-NEXT:    ret
+;
+; RV64IAB-LABEL: atomicrmw_max_i32_seq_cst:
+; RV64IAB:       # %bb.0:
+; RV64IAB-NEXT:    amomax.w.aqrl a0, a1, (a0)
+; RV64IAB-NEXT:    ret
   %1 = atomicrmw max ptr %a, i32 %b seq_cst
   ret i32 %1
 }
 
 define i32 @atomicrmw_min_i32_seq_cst(ptr %a, i32 %b) nounwind {
-; RV32I-ZALRSC-LABEL: atomicrmw_min_i32_seq_cst:
-; RV32I-ZALRSC:       # %bb.0:
-; RV32I-ZALRSC-NEXT:  .LBB1_1: # =>This Inner Loop Header: Depth=1
-; RV32I-ZALRSC-NEXT:    lr.w.aqrl a2, (a0)
-; RV32I-ZALRSC-NEXT:    mv a3, a2
-; RV32I-ZALRSC-NEXT:    bge a1, a3, .LBB1_3
-; RV32I-ZALRSC-NEXT:  # %bb.2: # in Loop: Header=BB1_1 Depth=1
-; RV32I-ZALRSC-NEXT:    mv a3, a1
-; RV32I-ZALRSC-NEXT:  .LBB1_3: # in Loop: Header=BB1_1 Depth=1
-; RV32I-ZALRSC-NEXT:    sc.w.rl a3, a3, (a0)
-; RV32I-ZALRSC-NEXT:    bnez a3, .LBB1_1
-; RV32I-ZALRSC-NEXT:  # %bb.4:
-; RV32I-ZALRSC-NEXT:    mv a0, a2
-; RV32I-ZALRSC-NEXT:    ret
-;
 ; RV32IB-ZALRSC-LABEL: atomicrmw_min_i32_seq_cst:
 ; RV32IB-ZALRSC:       # %bb.0:
 ; RV32IB-ZALRSC-NEXT:  .LBB1_1: # =>This Inner Loop Header: Depth=1
 ; RV32IB-ZALRSC-NEXT:    lr.w.aqrl a2, (a0)
-; RV32IB-ZALRSC-NEXT:    min a3, a2, a1
+; RV32IB-ZALRSC-NEXT:    mv a3, a2
+; RV32IB-ZALRSC-NEXT:    bge a1, a3, .LBB1_3
+; RV32IB-ZALRSC-NEXT:  # %bb.2: # in Loop: Header=BB1_1 Depth=1
+; RV32IB-ZALRSC-NEXT:    mv a3, a1
+; RV32IB-ZALRSC-NEXT:  .LBB1_3: # in Loop: Header=BB1_1 Depth=1
 ; RV32IB-ZALRSC-NEXT:    sc.w.rl a3, a3, (a0)
 ; RV32IB-ZALRSC-NEXT:    bnez a3, .LBB1_1
-; RV32IB-ZALRSC-NEXT:  # %bb.2:
+; RV32IB-ZALRSC-NEXT:  # %bb.4:
 ; RV32IB-ZALRSC-NEXT:    mv a0, a2
 ; RV32IB-ZALRSC-NEXT:    ret
 ;
-; RV32IA-LABEL: atomicrmw_min_i32_seq_cst:
-; RV32IA:       # %bb.0:
-; RV32IA-NEXT:    amomin.w.aqrl a0, a1, (a0)
-; RV32IA-NEXT:    ret
-;
-; RV64I-ZALRSC-LABEL: atomicrmw_min_i32_seq_cst:
-; RV64I-ZALRSC:       # %bb.0:
-; RV64I-ZALRSC-NEXT:    sext.w a2, a1
-; RV64I-ZALRSC-NEXT:  .LBB1_1: # =>This Inner Loop Header: Depth=1
-; RV64I-ZALRSC-NEXT:    lr.w.aqrl a1, (a0)
-; RV64I-ZALRSC-NEXT:    mv a3, a1
-; RV64I-ZALRSC-NEXT:    bge a2, a3, .LBB1_3
-; RV64I-ZALRSC-NEXT:  # %bb.2: # in Loop: Header=BB1_1 Depth=1
-; RV64I-ZALRSC-NEXT:    mv a3, a2
-; RV64I-ZALRSC-NEXT:  .LBB1_3: # in Loop: Header=BB1_1 Depth=1
-; RV64I-ZALRSC-NEXT:    sc.w.rl a3, a3, (a0)
-; RV64I-ZALRSC-NEXT:    bnez a3, .LBB1_1
-; RV64I-ZALRSC-NEXT:  # %bb.4:
-; RV64I-ZALRSC-NEXT:    mv a0, a1
-; RV64I-ZALRSC-NEXT:    ret
+; RV32IB-ZALRSC-PERM-LABEL: atomicrmw_min_i32_seq_cst:
+; RV32IB-ZALRSC-PERM:       # %bb.0:
+; RV32IB-ZALRSC-PERM-NEXT:  .LBB1_1: # =>This Inner Loop Header: Depth=1
+; RV32IB-ZALRSC-PERM-NEXT:    lr.w.aqrl a2, (a0)
+; RV32IB-ZALRSC-PERM-NEXT:    min a3, a2, a1
+; RV32IB-ZALRSC-PERM-NEXT:    sc.w.rl a3, a3, (a0)
+; RV32IB-ZALRSC-PERM-NEXT:    bnez a3, .LBB1_1
+; RV32IB-ZALRSC-PERM-NEXT:  # %bb.2:
+; RV32IB-ZALRSC-PERM-NEXT:    mv a0, a2
+; RV32IB-ZALRSC-PERM-NEXT:    ret
+;
+; RV32IAB-LABEL: atomicrmw_min_i32_seq_cst:
+; RV32IAB:       # %bb.0:
+; RV32IAB-NEXT:    amomin.w.aqrl a0, a1, (a0)
+; RV32IAB-NEXT:    ret
 ;
 ; RV64IB-ZALRSC-LABEL: atomicrmw_min_i32_seq_cst:
 ; RV64IB-ZALRSC:       # %bb.0:
 ; RV64IB-ZALRSC-NEXT:    sext.w a2, a1
 ; RV64IB-ZALRSC-NEXT:  .LBB1_1: # =>This Inner Loop Header: Depth=1
 ; RV64IB-ZALRSC-NEXT:    lr.w.aqrl a1, (a0)
-; RV64IB-ZALRSC-NEXT:    min a3, a1, a2
+; RV64IB-ZALRSC-NEXT:    mv a3, a1
+; RV64IB-ZALRSC-NEXT:    bge a2, a3, .LBB1_3
+; RV64IB-ZALRSC-NEXT:  # %bb.2: # in Loop: Header=BB1_1 Depth=1
+; RV64IB-ZALRSC-NEXT:    mv a3, a2
+; RV64IB-ZALRSC-NEXT:  .LBB1_3: # in Loop: Header=BB1_1 Depth=1
 ; RV64IB-ZALRSC-NEXT:    sc.w.rl a3, a3, (a0)
 ; RV64IB-ZALRSC-NEXT:    bnez a3, .LBB1_1
-; RV64IB-ZALRSC-NEXT:  # %bb.2:
+; RV64IB-ZALRSC-NEXT:  # %bb.4:
 ; RV64IB-ZALRSC-NEXT:    mv a0, a1
 ; RV64IB-ZALRSC-NEXT:    ret
 ;
-; RV64IA-LABEL: atomicrmw_min_i32_seq_cst:
-; RV64IA:       # %bb.0:
-; RV64IA-NEXT:    amomin.w.aqrl a0, a1, (a0)
-; RV64IA-NEXT:    ret
+; RV64IB-ZALRSC-PERM-LABEL: atomicrmw_min_i32_seq_cst:
+; RV64IB-ZALRSC-PERM:       # %bb.0:
+; RV64IB-ZALRSC-PERM-NEXT:    sext.w a2, a1
+; RV64IB-ZALRSC-PERM-NEXT:  .LBB1_1: # =>This Inner Loop Header: Depth=1
+; RV64IB-ZALRSC-PERM-NEXT:    lr.w.aqrl a1, (a0)
+; RV64IB-ZALRSC-PERM-NEXT:    min a3, a1, a2
+; RV64IB-ZALRSC-PERM-NEXT:    sc.w.rl a3, a3, (a0)
+; RV64IB-ZALRSC-PERM-NEXT:    bnez a3, .LBB1_1
+; RV64IB-ZALRSC-PERM-NEXT:  # %bb.2:
+; RV64IB-ZALRSC-PERM-NEXT:    mv a0, a1
+; RV64IB-ZALRSC-PERM-NEXT:    ret
+;
+; RV64IAB-LABEL: atomicrmw_min_i32_seq_cst:
+; RV64IAB:       # %bb.0:
+; RV64IAB-NEXT:    amomin.w.aqrl a0, a1, (a0)
+; RV64IAB-NEXT:    ret
   %1 = atomicrmw min ptr %a, i32 %b seq_cst
   ret i32 %1
 }
 
 define i32 @atomicrmw_umax_i32_seq_cst(ptr %a, i32 %b) nounwind {
-; RV32I-ZALRSC-LABEL: atomicrmw_umax_i32_seq_cst:
-; RV32I-ZALRSC:       # %bb.0:
-; RV32I-ZALRSC-NEXT:  .LBB2_1: # =>This Inner Loop Header: Depth=1
-; RV32I-ZALRSC-NEXT:    lr.w.aqrl a2, (a0)
-; RV32I-ZALRSC-NEXT:    mv a3, a2
-; RV32I-ZALRSC-NEXT:    bgeu a3, a1, .LBB2_3
-; RV32I-ZALRSC-NEXT:  # %bb.2: # in Loop: Header=BB2_1 Depth=1
-; RV32I-ZALRSC-NEXT:    mv a3, a1
-; RV32I-ZALRSC-NEXT:  .LBB2_3: # in Loop: Header=BB2_1 Depth=1
-; RV32I-ZALRSC-NEXT:    sc.w.rl a3, a3, (a0)
-; RV32I-ZALRSC-NEXT:    bnez a3, .LBB2_1
-; RV32I-ZALRSC-NEXT:  # %bb.4:
-; RV32I-ZALRSC-NEXT:    mv a0, a2
-; RV32I-ZALRSC-NEXT:    ret
-;
 ; RV32IB-ZALRSC-LABEL: atomicrmw_umax_i32_seq_cst:
 ; RV32IB-ZALRSC:       # %bb.0:
 ; RV32IB-ZALRSC-NEXT:  .LBB2_1: # =>This Inner Loop Header: Depth=1
 ; RV32IB-ZALRSC-NEXT:    lr.w.aqrl a2, (a0)
-; RV32IB-ZALRSC-NEXT:    maxu a3, a2, a1
+; RV32IB-ZALRSC-NEXT:    mv a3, a2
+; RV32IB-ZALRSC-NEXT:    bgeu a3, a1, .LBB2_3
+; RV32IB-ZALRSC-NEXT:  # %bb.2: # in Loop: Header=BB2_1 Depth=1
+; RV32IB-ZALRSC-NEXT:    mv a3, a1
+; RV32IB-ZALRSC-NEXT:  .LBB2_3: # in Loop: Header=BB2_1 Depth=1
 ; RV32IB-ZALRSC-NEXT:    sc.w.rl a3, a3, (a0)
 ; RV32IB-ZALRSC-NEXT:    bnez a3, .LBB2_1
-; RV32IB-ZALRSC-NEXT:  # %bb.2:
+; RV32IB-ZALRSC-NEXT:  # %bb.4:
 ; RV32IB-ZALRSC-NEXT:    mv a0, a2
 ; RV32IB-ZALRSC-NEXT:    ret
 ;
-; RV32IA-LABEL: atomicrmw_umax_i32_seq_cst:
-; RV32IA:       # %bb.0:
-; RV32IA-NEXT:    amomaxu.w.aqrl a0, a1, (a0)
-; RV32IA-NEXT:    ret
-;
-; RV64I-ZALRSC-LABEL: atomicrmw_umax_i32_seq_cst:
-; RV64I-ZALRSC:       # %bb.0:
-; RV64I-ZALRSC-NEXT:    sext.w a2, a1
-; RV64I-ZALRSC-NEXT:  .LBB2_1: # =>This Inner Loop Header: Depth=1
-; RV64I-ZALRSC-NEXT:    lr.w.aqrl a1, (a0)
-; RV64I-ZALRSC-NEXT:    mv a3, a1
-; RV64I-ZALRSC-NEXT:    bgeu a3, a2, .LBB2_3
-; RV64I-ZALRSC-NEXT:  # %bb.2: # in Loop: Header=BB2_1 Depth=1
-; RV64I-ZALRSC-NEXT:    mv a3, a2
-; RV64I-ZALRSC-NEXT:  .LBB2_3: # in Loop: Header=BB2_1 Depth=1
-; RV64I-ZALRSC-NEXT:    sc.w.rl a3, a3, (a0)
-; RV64I-ZALRSC-NEXT:    bnez a3, .LBB2_1
-; RV64I-ZALRSC-NEXT:  # %bb.4:
-; RV64I-ZALRSC-NEXT:    mv a0, a1
-; RV64I-ZALRSC-NEXT:    ret
+; RV32IB-ZALRSC-PERM-LABEL: atomicrmw_umax_i32_seq_cst:
+; RV32IB-ZALRSC-PERM:       # %bb.0:
+; RV32IB-ZALRSC-PERM-NEXT:  .LBB2_1: # =>This Inner Loop Header: Depth=1
+; RV32IB-ZALRSC-PERM-NEXT:    lr.w.aqrl a2, (a0)
+; RV32IB-ZALRSC-PERM-NEXT:    maxu a3, a2, a1
+; RV32IB-ZALRSC-PERM-NEXT:    sc.w.rl a3, a3, (a0)
+; RV32IB-ZALRSC-PERM-NEXT:    bnez a3, .LBB2_1
+; RV32IB-ZALRSC-PERM-NEXT:  # %bb.2:
+; RV32IB-ZALRSC-PERM-NEXT:    mv a0, a2
+; RV32IB-ZALRSC-PERM-NEXT:    ret
+;
+; RV32IAB-LABEL: atomicrmw_umax_i32_seq_cst:
+; RV32IAB:       # %bb.0:
+; RV32IAB-NEXT:    amomaxu.w.aqrl a0, a1, (a0)
+; RV32IAB-NEXT:    ret
 ;
 ; RV64IB-ZALRSC-LABEL: atomicrmw_umax_i32_seq_cst:
 ; RV64IB-ZALRSC:       # %bb.0:
 ; RV64IB-ZALRSC-NEXT:    sext.w a2, a1
 ; RV64IB-ZALRSC-NEXT:  .LBB2_1: # =>This Inner Loop Header: Depth=1
 ; RV64IB-ZALRSC-NEXT:    lr.w.aqrl a1, (a0)
-; RV64IB-ZALRSC-NEXT:    maxu a3, a1, a2
+; RV64IB-ZALRSC-NEXT:    mv a3, a1
+; RV64IB-ZALRSC-NEXT:    bgeu a3, a2, .LBB2_3
+; RV64IB-ZALRSC-NEXT:  # %bb.2: # in Loop: Header=BB2_1 Depth=1
+; RV64IB-ZALRSC-NEXT:    mv a3, a2
+; RV64IB-ZALRSC-NEXT:  .LBB2_3: # in Loop: Header=BB2_1 Depth=1
 ; RV64IB-ZALRSC-NEXT:    sc.w.rl a3, a3, (a0)
 ; RV64IB-ZALRSC-NEXT:    bnez a3, .LBB2_1
-; RV64IB-ZALRSC-NEXT:  # %bb.2:
+; RV64IB-ZALRSC-NEXT:  # %bb.4:
 ; RV64IB-ZALRSC-NEXT:    mv a0, a1
 ; RV64IB-ZALRSC-NEXT:    ret
 ;
-; RV64IA-LABEL: atomicrmw_umax_i32_seq_cst:
-; RV64IA:       # %bb.0:
-; RV64IA-NEXT:    amomaxu.w.aqrl a0, a1, (a0)
-; RV64IA-NEXT:    ret
+; RV64IB-ZALRSC-PERM-LABEL: atomicrmw_umax_i32_seq_cst:
+; RV64IB-ZALRSC-PERM:       # %bb.0:
+; RV64IB-ZALRSC-PERM-NEXT:    sext.w a2, a1
+; RV64IB-ZALRSC-PERM-NEXT:  .LBB2_1: # =>This Inner Loop Header: Depth=1
+; RV64IB-ZALRSC-PERM-NEXT:    lr.w.aqrl a1, (a0)
+; RV64IB-ZALRSC-PERM-NEXT:    maxu a3, a1, a2
+; RV64IB-ZALRSC-PERM-NEXT:    sc.w.rl a3, a3, (a0)
+; RV64IB-ZALRSC-PERM-NEXT:    bnez a3, .LBB2_1
+; RV64IB-ZALRSC-PERM-NEXT:  # %bb.2:
+; RV64IB-ZALRSC-PERM-NEXT:    mv a0, a1
+; RV64IB-ZALRSC-PERM-NEXT:    ret
+;
+; RV64IAB-LABEL: atomicrmw_umax_i32_seq_cst:
+; RV64IAB:       # %bb.0:
+; RV64IAB-NEXT:    amomaxu.w.aqrl a0, a1, (a0)
+; RV64IAB-NEXT:    ret
   %1 = atomicrmw umax ptr %a, i32 %b seq_cst
   ret i32 %1
 }
 
 define i32 @atomicrmw_umin_i32_seq_cst(ptr %a, i32 %b) nounwind {
-; RV32I-ZALRSC-LABEL: atomicrmw_umin_i32_seq_cst:
-; RV32I-ZALRSC:       # %bb.0:
-; RV32I-ZALRSC-NEXT:  .LBB3_1: # =>This Inner Loop Header: Depth=1
-; RV32I-ZALRSC-NEXT:    lr.w.aqrl a2, (a0)
-; RV32I-ZALRSC-NEXT:    mv a3, a2
-; RV32I-ZALRSC-NEXT:    bgeu a1, a3, .LBB3_3
-; RV32I-ZALRSC-NEXT:  # %bb.2: # in Loop: Header=BB3_1 Depth=1
-; RV32I-ZALRSC-NEXT:    mv a3, a1
-; RV32I-ZALRSC-NEXT:  .LBB3_3: # in Loop: Header=BB3_1 Depth=1
-; RV32I-ZALRSC-NEXT:    sc.w.rl a3, a3, (a0)
-; RV32I-ZALRSC-NEXT:    bnez a3, .LBB3_1
-; RV32I-ZALRSC-NEXT:  # %bb.4:
-; RV32I-ZALRSC-NEXT:    mv a0, a2
-; RV32I-ZALRSC-NEXT:    ret
-;
 ; RV32IB-ZALRSC-LABEL: atomicrmw_umin_i32_seq_cst:
 ; RV32IB-ZALRSC:       # %bb.0:
 ; RV32IB-ZALRSC-NEXT:  .LBB3_1: # =>This Inner Loop Header: Depth=1
 ; RV32IB-ZALRSC-NEXT:    lr.w.aqrl a2, (a0)
-; RV32IB-ZALRSC-NEXT:    minu a3, a2, a1
+; RV32IB-ZALRSC-NEXT:    mv a3, a2
+; RV32IB-ZALRSC-NEXT:    bgeu a1, a3, .LBB3_3
+; RV32IB-ZALRSC-NEXT:  # %bb.2: # in Loop: Header=BB3_1 Depth=1
+; RV32IB-ZALRSC-NEXT:    mv a3, a1
+; RV32IB-ZALRSC-NEXT:  .LBB3_3: # in Loop: Header=BB3_1 Depth=1
 ; RV32IB-ZALRSC-NEXT:    sc.w.rl a3, a3, (a0)
 ; RV32IB-ZALRSC-NEXT:    bnez a3, .LBB3_1
-; RV32IB-ZALRSC-NEXT:  # %bb.2:
+; RV32IB-ZALRSC-NEXT:  # %bb.4:
 ; RV32IB-ZALRSC-NEXT:    mv a0, a2
 ; RV32IB-ZALRSC-NEXT:    ret
 ;
-; RV32IA-LABEL: atomicrmw_umin_i32_seq_cst:
-; RV32IA:       # %bb.0:
-; RV32IA-NEXT:    amominu.w.aqrl a0, a1, (a0)
-; RV32IA-NEXT:    ret
-;
-; RV64I-ZALRSC-LABEL: atomicrmw_umin_i32_seq_cst:
-; RV64I-ZALRSC:       # %bb.0:
-; RV64I-ZALRSC-NEXT:    sext.w a2, a1
-; RV64I-ZALRSC-NEXT:  .LBB3_1: # =>This Inner Loop Header: Depth=1
-; RV64I-ZALRSC-NEXT:    lr.w.aqrl a1, (a0)
-; RV64I-ZALRSC-NEXT:    mv a3, a1
-; RV64I-ZALRSC-NEXT:    bgeu a2, a3, .LBB3_3
-; RV64I-ZALRSC-NEXT:  # %bb.2: # in Loop: Header=BB3_1 Depth=1
-; RV64I-ZALRSC-NEXT:    mv a3, a2
-; RV64I-ZALRSC-NEXT:  .LBB3_3: # in Loop: Header=BB3_1 Depth=1
-; RV64I-ZALRSC-NEXT:    sc.w.rl a3, a3, (a0)
-; RV64I-ZALRSC-NEXT:    bnez a3, .LBB3_1
-; RV64I-ZALRSC-NEXT:  # %bb.4:
-; RV64I-ZALRSC-NEXT:    mv a0, a1
-; RV64I-ZALRSC-NEXT:    ret
+; RV32IB-ZALRSC-PERM-LABEL: atomicrmw_umin_i32_seq_cst:
+; RV32IB-ZALRSC-PERM:       # %bb.0:
+; RV32IB-ZALRSC-PERM-NEXT:  .LBB3_1: # =>This Inner Loop Header: Depth=1
+; RV32IB-ZALRSC-PERM-NEXT:    lr.w.aqrl a2, (a0)
+; RV32IB-ZALRSC-PERM-NEXT:    minu a3, a2, a1
+; RV32IB-ZALRSC-PERM-NEXT:    sc.w.rl a3, a3, (a0)
+; RV32IB-ZALRSC-PERM-NEXT:    bnez a3, .LBB3_1
+; RV32IB-ZALRSC-PERM-NEXT:  # %bb.2:
+; RV32IB-ZALRSC-PERM-NEXT:    mv a0, a2
+; RV32IB-ZALRSC-PERM-NEXT:    ret
+;
+; RV32IAB-LABEL: atomicrmw_umin_i32_seq_cst:
+; RV32IAB:       # %bb.0:
+; RV32IAB-NEXT:    amominu.w.aqrl a0, a1, (a0)
+; RV32IAB-NEXT:    ret
 ;
 ; RV64IB-ZALRSC-LABEL: atomicrmw_umin_i32_seq_cst:
 ; RV64IB-ZALRSC:       # %bb.0:
 ; RV64IB-ZALRSC-NEXT:    sext.w a2, a1
 ; RV64IB-ZALRSC-NEXT:  .LBB3_1: # =>This Inner Loop Header: Depth=1
 ; RV64IB-ZALRSC-NEXT:    lr.w.aqrl a1, (a0)
-; RV64IB-ZALRSC-NEXT:    minu a3, a1, a2
+; RV64IB-ZALRSC-NEXT:    mv a3, a1
+; RV64IB-ZALRSC-NEXT:    bgeu a2, a3, .LBB3_3
+; RV64IB-ZALRSC-NEXT:  # %bb.2: # in Loop: Header=BB3_1 Depth=1
+; RV64IB-ZALRSC-NEXT:    mv a3, a2
+; RV64IB-ZALRSC-NEXT:  .LBB3_3: # in Loop: Header=BB3_1 Depth=1
 ; RV64IB-ZALRSC-NEXT:    sc.w.rl a3, a3, (a0)
 ; RV64IB-ZALRSC-NEXT:    bnez a3, .LBB3_1
-; RV64IB-ZALRSC-NEXT:  # %bb.2:
+; RV64IB-ZALRSC-NEXT:  # %bb.4:
 ; RV64IB-ZALRSC-NEXT:    mv a0, a1
 ; RV64IB-ZALRSC-NEXT:    ret
 ;
-; RV64IA-LABEL: atomicrmw_umin_i32_seq_cst:
-; RV64IA:       # %bb.0:
-; RV64IA-NEXT:    amominu.w.aqrl a0, a1, (a0)
-; RV64IA-NEXT:    ret
+; RV64IB-ZALRSC-PERM-LABEL: atomicrmw_umin_i32_seq_cst:
+; RV64IB-ZALRSC-PERM:       # %bb.0:
+; RV64IB-ZALRSC-PERM-NEXT:    sext.w a2, a1
+; RV64IB-ZALRSC-PERM-NEXT:  .LBB3_1: # =>This Inner Loop Header: Depth=1
+; RV64IB-ZALRSC-PERM-NEXT:    lr.w.aqrl a1, (a0)
+; RV64IB-ZALRSC-PERM-NEXT:    minu a3, a1, a2
+; RV64IB-ZALRSC-PERM-NEXT:    sc.w.rl a3, a3, (a0)
+; RV64IB-ZALRSC-PERM-NEXT:    bnez a3, .LBB3_1
+; RV64IB-ZALRSC-PERM-NEXT:  # %bb.2:
+; RV64IB-ZALRSC-PERM-NEXT:    mv a0, a1
+; RV64IB-ZALRSC-PERM-NEXT:    ret
+;
+; RV64IAB-LABEL: atomicrmw_umin_i32_seq_cst:
+; RV64IAB:       # %bb.0:
+; RV64IAB-NEXT:    amominu.w.aqrl a0, a1, (a0)
+; RV64IAB-NEXT:    ret
   %1 = atomicrmw umin ptr %a, i32 %b seq_cst
   ret i32 %1
 }
 
 define i64 @atomicrmw_max_i64_seq_cst(ptr %a, i64 %b) nounwind {
-; RV32I-ZALRSC-LABEL: atomicrmw_max_i64_seq_cst:
-; RV32I-ZALRSC:       # %bb.0:
-; RV32I-ZALRSC-NEXT:    addi sp, sp, -32
-; RV32I-ZALRSC-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
-; RV32I-ZALRSC-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
-; RV32I-ZALRSC-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
-; RV32I-ZALRSC-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
-; RV32I-ZALRSC-NEXT:    mv s0, a2
-; RV32I-ZALRSC-NEXT:    mv s1, a0
-; RV32I-ZALRSC-NEXT:    lw a4, 0(a0)
-; RV32I-ZALRSC-NEXT:    lw a5, 4(a0)
-; RV32I-ZALRSC-NEXT:    mv s2, a1
-; RV32I-ZALRSC-NEXT:    j .LBB4_2
-; RV32I-ZALRSC-NEXT:  .LBB4_1: # %atomicrmw.start
-; RV32I-ZALRSC-NEXT:    # in Loop: Header=BB4_2 Depth=1
-; RV32I-ZALRSC-NEXT:    sw a4, 8(sp)
-; RV32I-ZALRSC-NEXT:    sw a5, 12(sp)
-; RV32I-ZALRSC-NEXT:    addi a1, sp, 8
-; RV32I-ZALRSC-NEXT:    li a4, 5
-; RV32I-ZALRSC-NEXT:    li a5, 5
-; RV32I-ZALRSC-NEXT:    mv a0, s1
-; RV32I-ZALRSC-NEXT:    call __atomic_compare_exchange_8
-; RV32I-ZALRSC-NEXT:    lw a4, 8(sp)
-; RV32I-ZALRSC-NEXT:    lw a5, 12(sp)
-; RV32I-ZALRSC-NEXT:    bnez a0, .LBB4_7
-; RV32I-ZALRSC-NEXT:  .LBB4_2: # %atomicrmw.start
-; RV32I-ZALRSC-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32I-ZALRSC-NEXT:    beq a5, s0, .LBB4_4
-; RV32I-ZALRSC-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-ZALRSC-NEXT:    # in Loop: Header=BB4_2 Depth=1
-; RV32I-ZALRSC-NEXT:    slt a0, s0, a5
-; RV32I-ZALRSC-NEXT:    j .LBB4_5
-; RV32I-ZALRSC-NEXT:  .LBB4_4: # in Loop: Header=BB4_2 Depth=1
-; RV32I-ZALRSC-NEXT:    sltu a0, s2, a4
-; RV32I-ZALRSC-NEXT:  .LBB4_5: # %atomicrmw.start
-; RV32I-ZALRSC-NEXT:    # in Loop: Header=BB4_2 Depth=1
-; RV32I-ZALRSC-NEXT:    mv a2, a4
-; RV32I-ZALRSC-NEXT:    mv a3, a5
-; RV32I-ZALRSC-NEXT:    bnez a0, .LBB4_1
-; RV32I-ZALRSC-NEXT:  # %bb.6: # %atomicrmw.start
-; RV32I-ZALRSC-NEXT:    # in Loop: Header=BB4_2 Depth=1
-; RV32I-ZALRSC-NEXT:    mv a2, s2
-; RV32I-ZALRSC-NEXT:    mv a3, s0
-; RV32I-ZALRSC-NEXT:    j .LBB4_1
-; RV32I-ZALRSC-NEXT:  .LBB4_7: # %atomicrmw.end
-; RV32I-ZALRSC-NEXT:    mv a0, a4
-; RV32I-ZALRSC-NEXT:    mv a1, a5
-; RV32I-ZALRSC-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
-; RV32I-ZALRSC-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
-; RV32I-ZALRSC-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
-; RV32I-ZALRSC-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
-; RV32I-ZALRSC-NEXT:    addi sp, sp, 32
-; RV32I-ZALRSC-NEXT:    ret
-;
-; RV32IB-ZALRSC-LABEL: atomicrmw_max_i64_seq_cst:
-; RV32IB-ZALRSC:       # %bb.0:
-; RV32IB-ZALRSC-NEXT:    addi sp, sp, -32
-; RV32IB-ZALRSC-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IB-ZALRSC-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
-; RV32IB-ZALRSC-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IB-ZALRSC-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
-; RV32IB-ZALRSC-NEXT:    mv s0, a2
-; RV32IB-ZALRSC-NEXT:    mv s1, a0
-; RV32IB-ZALRSC-NEXT:    lw a4, 0(a0)
-; RV32IB-ZALRSC-NEXT:    lw a5, 4(a0)
-; RV32IB-ZALRSC-NEXT:    mv s2, a1
-; RV32IB-ZALRSC-NEXT:    j .LBB4_2
-; RV32IB-ZALRSC-NEXT:  .LBB4_1: # %atomicrmw.start
-; RV32IB-ZALRSC-NEXT:    # in Loop: Header=BB4_2 Depth=1
-; RV32IB-ZALRSC-NEXT:    sw a4, 8(sp)
-; RV32IB-ZALRSC-NEXT:    sw a5, 12(sp)
-; RV32IB-ZALRSC-NEXT:    addi a1, sp, 8
-; RV32IB-ZALRSC-NEXT:    li a4, 5
-; RV32IB-ZALRSC-NEXT:    li a5, 5
-; RV32IB-ZALRSC-NEXT:    mv a0, s1
-; RV32IB-ZALRSC-NEXT:    call __atomic_compare_exchange_8
-; RV32IB-ZALRSC-NEXT:    lw a4, 8(sp)
-; RV32IB-ZALRSC-NEXT:    lw a5, 12(sp)
-; RV32IB-ZALRSC-NEXT:    bnez a0, .LBB4_7
-; RV32IB-ZALRSC-NEXT:  .LBB4_2: # %atomicrmw.start
-; RV32IB-ZALRSC-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32IB-ZALRSC-NEXT:    beq a5, s0, .LBB4_4
-; RV32IB-ZALRSC-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32IB-ZALRSC-NEXT:    # in Loop: Header=BB4_2 Depth=1
-; RV32IB-ZALRSC-NEXT:    slt a0, s0, a5
-; RV32IB-ZALRSC-NEXT:    j .LBB4_5
-; RV32IB-ZALRSC-NEXT:  .LBB4_4: # in Loop: Header=BB4_2 Depth=1
-; RV32IB-ZALRSC-NEXT:    sltu a0, s2, a4
-; RV32IB-ZALRSC-NEXT:  .LBB4_5: # %atomicrmw.start
-; RV32IB-ZALRSC-NEXT:    # in Loop: Header=BB4_2 Depth=1
-; RV32IB-ZALRSC-NEXT:    mv a2, a4
-; RV32IB-ZALRSC-NEXT:    mv a3, a5
-; RV32IB-ZALRSC-NEXT:    bnez a0, .LBB4_1
-; RV32IB-ZALRSC-NEXT:  # %bb.6: # %atomicrmw.start
-; RV32IB-ZALRSC-NEXT:    # in Loop: Header=BB4_2 Depth=1
-; RV32IB-ZALRSC-NEXT:    mv a2, s2
-; RV32IB-ZALRSC-NEXT:    mv a3, s0
-; RV32IB-ZALRSC-NEXT:    j .LBB4_1
-; RV32IB-ZALRSC-NEXT:  .LBB4_7: # %atomicrmw.end
-; RV32IB-ZALRSC-NEXT:    mv a0, a4
-; RV32IB-ZALRSC-NEXT:    mv a1, a5
-; RV32IB-ZALRSC-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IB-ZALRSC-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
-; RV32IB-ZALRSC-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
-; RV32IB-ZALRSC-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
-; RV32IB-ZALRSC-NEXT:    addi sp, sp, 32
-; RV32IB-ZALRSC-NEXT:    ret
-;
-; RV32IA-LABEL: atomicrmw_max_i64_seq_cst:
-; RV32IA:       # %bb.0:
-; RV32IA-NEXT:    addi sp, sp, -32
-; RV32IA-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IA-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
-; RV32IA-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IA-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
-; RV32IA-NEXT:    mv s0, a2
-; RV32IA-NEXT:    mv s1, a0
-; RV32IA-NEXT:    lw a4, 0(a0)
-; RV32IA-NEXT:    lw a5, 4(a0)
-; RV32IA-NEXT:    mv s2, a1
-; RV32IA-NEXT:    j .LBB4_2
-; RV32IA-NEXT:  .LBB4_1: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB4_2 Depth=1
-; RV32IA-NEXT:    sw a4, 8(sp)
-; RV32IA-NEXT:    sw a5, 12(sp)
-; RV32IA-NEXT:    addi a1, sp, 8
-; RV32IA-NEXT:    li a4, 5
-; RV32IA-NEXT:    li a5, 5
-; RV32IA-NEXT:    mv a0, s1
-; RV32IA-NEXT:    call __atomic_compare_exchange_8
-; RV32IA-NEXT:    lw a4, 8(sp)
-; RV32IA-NEXT:    lw a5, 12(sp)
-; RV32IA-NEXT:    bnez a0, .LBB4_7
-; RV32IA-NEXT:  .LBB4_2: # %atomicrmw.start
-; RV32IA-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32IA-NEXT:    beq a5, s0, .LBB4_4
-; RV32IA-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB4_2 Depth=1
-; RV32IA-NEXT:    slt a0, s0, a5
-; RV32IA-NEXT:    j .LBB4_5
-; RV32IA-NEXT:  .LBB4_4: # in Loop: Header=BB4_2 Depth=1
-; RV32IA-NEXT:    sltu a0, s2, a4
-; RV32IA-NEXT:  .LBB4_5: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB4_2 Depth=1
-; RV32IA-NEXT:    mv a2, a4
-; RV32IA-NEXT:    mv a3, a5
-; RV32IA-NEXT:    bnez a0, .LBB4_1
-; RV32IA-NEXT:  # %bb.6: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB4_2 Depth=1
-; RV32IA-NEXT:    mv a2, s2
-; RV32IA-NEXT:    mv a3, s0
-; RV32IA-NEXT:    j .LBB4_1
-; RV32IA-NEXT:  .LBB4_7: # %atomicrmw.end
-; RV32IA-NEXT:    mv a0, a4
-; RV32IA-NEXT:    mv a1, a5
-; RV32IA-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IA-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
-; RV32IA-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
-; RV32IA-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
-; RV32IA-NEXT:    addi sp, sp, 32
-; RV32IA-NEXT:    ret
-;
-; RV64I-ZALRSC-LABEL: atomicrmw_max_i64_seq_cst:
-; RV64I-ZALRSC:       # %bb.0:
-; RV64I-ZALRSC-NEXT:  .LBB4_1: # =>This Inner Loop Header: Depth=1
-; RV64I-ZALRSC-NEXT:    lr.d.aqrl a2, (a0)
-; RV64I-ZALRSC-NEXT:    mv a3, a2
-; RV64I-ZALRSC-NEXT:    bge a3, a1, .LBB4_3
-; RV64I-ZALRSC-NEXT:  # %bb.2: # in Loop: Header=BB4_1 Depth=1
-; RV64I-ZALRSC-NEXT:    mv a3, a1
-; RV64I-ZALRSC-NEXT:  .LBB4_3: # in Loop: Header=BB4_1 Depth=1
-; RV64I-ZALRSC-NEXT:    sc.d.rl a3, a3, (a0)
-; RV64I-ZALRSC-NEXT:    bnez a3, .LBB4_1
-; RV64I-ZALRSC-NEXT:  # %bb.4:
-; RV64I-ZALRSC-NEXT:    mv a0, a2
-; RV64I-ZALRSC-NEXT:    ret
+; RV32IB-COMMON-LABEL: atomicrmw_max_i64_seq_cst:
+; RV32IB-COMMON:       # %bb.0:
+; RV32IB-COMMON-NEXT:    addi sp, sp, -32
+; RV32IB-COMMON-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32IB-COMMON-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32IB-COMMON-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32IB-COMMON-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
+; RV32IB-COMMON-NEXT:    mv s0, a2
+; RV32IB-COMMON-NEXT:    mv s1, a0
+; RV32IB-COMMON-NEXT:    lw a4, 0(a0)
+; RV32IB-COMMON-NEXT:    lw a5, 4(a0)
+; RV32IB-COMMON-NEXT:    mv s2, a1
+; RV32IB-COMMON-NEXT:    j .LBB4_2
+; RV32IB-COMMON-NEXT:  .LBB4_1: # %atomicrmw.start
+; RV32IB-COMMON-NEXT:    # in Loop: Header=BB4_2 Depth=1
+; RV32IB-COMMON-NEXT:    sw a4, 8(sp)
+; RV32IB-COMMON-NEXT:    sw a5, 12(sp)
+; RV32IB-COMMON-NEXT:    addi a1, sp, 8
+; RV32IB-COMMON-NEXT:    li a4, 5
+; RV32IB-COMMON-NEXT:    li a5, 5
+; RV32IB-COMMON-NEXT:    mv a0, s1
+; RV32IB-COMMON-NEXT:    call __atomic_compare_exchange_8
+; RV32IB-COMMON-NEXT:    lw a4, 8(sp)
+; RV32IB-COMMON-NEXT:    lw a5, 12(sp)
+; RV32IB-COMMON-NEXT:    bnez a0, .LBB4_7
+; RV32IB-COMMON-NEXT:  .LBB4_2: # %atomicrmw.start
+; RV32IB-COMMON-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32IB-COMMON-NEXT:    beq a5, s0, .LBB4_4
+; RV32IB-COMMON-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32IB-COMMON-NEXT:    # in Loop: Header=BB4_2 Depth=1
+; RV32IB-COMMON-NEXT:    slt a0, s0, a5
+; RV32IB-COMMON-NEXT:    j .LBB4_5
+; RV32IB-COMMON-NEXT:  .LBB4_4: # in Loop: Header=BB4_2 Depth=1
+; RV32IB-COMMON-NEXT:    sltu a0, s2, a4
+; RV32IB-COMMON-NEXT:  .LBB4_5: # %atomicrmw.start
+; RV32IB-COMMON-NEXT:    # in Loop: Header=BB4_2 Depth=1
+; RV32IB-COMMON-NEXT:    mv a2, a4
+; RV32IB-COMMON-NEXT:    mv a3, a5
+; RV32IB-COMMON-NEXT:    bnez a0, .LBB4_1
+; RV32IB-COMMON-NEXT:  # %bb.6: # %atomicrmw.start
+; RV32IB-COMMON-NEXT:    # in Loop: Header=BB4_2 Depth=1
+; RV32IB-COMMON-NEXT:    mv a2, s2
+; RV32IB-COMMON-NEXT:    mv a3, s0
+; RV32IB-COMMON-NEXT:    j .LBB4_1
+; RV32IB-COMMON-NEXT:  .LBB4_7: # %atomicrmw.end
+; RV32IB-COMMON-NEXT:    mv a0, a4
+; RV32IB-COMMON-NEXT:    mv a1, a5
+; RV32IB-COMMON-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32IB-COMMON-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32IB-COMMON-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32IB-COMMON-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32IB-COMMON-NEXT:    addi sp, sp, 32
+; RV32IB-COMMON-NEXT:    ret
 ;
 ; RV64IB-ZALRSC-LABEL: atomicrmw_max_i64_seq_cst:
 ; RV64IB-ZALRSC:       # %bb.0:
 ; RV64IB-ZALRSC-NEXT:  .LBB4_1: # =>This Inner Loop Header: Depth=1
 ; RV64IB-ZALRSC-NEXT:    lr.d.aqrl a2, (a0)
-; RV64IB-ZALRSC-NEXT:    max a3, a2, a1
+; RV64IB-ZALRSC-NEXT:    mv a3, a2
+; RV64IB-ZALRSC-NEXT:    bge a3, a1, .LBB4_3
+; RV64IB-ZALRSC-NEXT:  # %bb.2: # in Loop: Header=BB4_1 Depth=1
+; RV64IB-ZALRSC-NEXT:    mv a3, a1
+; RV64IB-ZALRSC-NEXT:  .LBB4_3: # in Loop: Header=BB4_1 Depth=1
 ; RV64IB-ZALRSC-NEXT:    sc.d.rl a3, a3, (a0)
 ; RV64IB-ZALRSC-NEXT:    bnez a3, .LBB4_1
-; RV64IB-ZALRSC-NEXT:  # %bb.2:
+; RV64IB-ZALRSC-NEXT:  # %bb.4:
 ; RV64IB-ZALRSC-NEXT:    mv a0, a2
 ; RV64IB-ZALRSC-NEXT:    ret
 ;
-; RV64IA-LABEL: atomicrmw_max_i64_seq_cst:
-; RV64IA:       # %bb.0:
-; RV64IA-NEXT:    amomax.d.aqrl a0, a1, (a0)
-; RV64IA-NEXT:    ret
+; RV64IB-ZALRSC-PERM-LABEL: atomicrmw_max_i64_seq_cst:
+; RV64IB-ZALRSC-PERM:       # %bb.0:
+; RV64IB-ZALRSC-PERM-NEXT:  .LBB4_1: # =>This Inner Loop Header: Depth=1
+; RV64IB-ZALRSC-PERM-NEXT:    lr.d.aqrl a2, (a0)
+; RV64IB-ZALRSC-PERM-NEXT:    max a3, a2, a1
+; RV64IB-ZALRSC-PERM-NEXT:    sc.d.rl a3, a3, (a0)
+; RV64IB-ZALRSC-PERM-NEXT:    bnez a3, .LBB4_1
+; RV64IB-ZALRSC-PERM-NEXT:  # %bb.2:
+; RV64IB-ZALRSC-PERM-NEXT:    mv a0, a2
+; RV64IB-ZALRSC-PERM-NEXT:    ret
+;
+; RV64IAB-LABEL: atomicrmw_max_i64_seq_cst:
+; RV64IAB:       # %bb.0:
+; RV64IAB-NEXT:    amomax.d.aqrl a0, a1, (a0)
+; RV64IAB-NEXT:    ret
   %1 = atomicrmw max ptr %a, i64 %b seq_cst
   ret i64 %1
 }
 
 define i64 @atomicrmw_min_i64_seq_cst(ptr %a, i64 %b) nounwind {
-; RV32I-ZALRSC-LABEL: atomicrmw_min_i64_seq_cst:
-; RV32I-ZALRSC:       # %bb.0:
-; RV32I-ZALRSC-NEXT:    addi sp, sp, -32
-; RV32I-ZALRSC-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
-; RV32I-ZALRSC-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
-; RV32I-ZALRSC-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
-; RV32I-ZALRSC-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
-; RV32I-ZALRSC-NEXT:    mv s0, a2
-; RV32I-ZALRSC-NEXT:    mv s1, a0
-; RV32I-ZALRSC-NEXT:    lw a4, 0(a0)
-; RV32I-ZALRSC-NEXT:    lw a5, 4(a0)
-; RV32I-ZALRSC-NEXT:    mv s2, a1
-; RV32I-ZALRSC-NEXT:    j .LBB5_2
-; RV32I-ZALRSC-NEXT:  .LBB5_1: # %atomicrmw.start
-; RV32I-ZALRSC-NEXT:    # in Loop: Header=BB5_2 Depth=1
-; RV32I-ZALRSC-NEXT:    sw a4, 8(sp)
-; RV32I-ZALRSC-NEXT:    sw a5, 12(sp)
-; RV32I-ZALRSC-NEXT:    addi a1, sp, 8
-; RV32I-ZALRSC-NEXT:    li a4, 5
-; RV32I-ZALRSC-NEXT:    li a5, 5
-; RV32I-ZALRSC-NEXT:    mv a0, s1
-; RV32I-ZALRSC-NEXT:    call __atomic_compare_exchange_8
-; RV32I-ZALRSC-NEXT:    lw a4, 8(sp)
-; RV32I-ZALRSC-NEXT:    lw a5, 12(sp)
-; RV32I-ZALRSC-NEXT:    bnez a0, .LBB5_7
-; RV32I-ZALRSC-NEXT:  .LBB5_2: # %atomicrmw.start
-; RV32I-ZALRSC-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32I-ZALRSC-NEXT:    beq a5, s0, .LBB5_4
-; RV32I-ZALRSC-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-ZALRSC-NEXT:    # in Loop: Header=BB5_2 Depth=1
-; RV32I-ZALRSC-NEXT:    slt a0, s0, a5
-; RV32I-ZALRSC-NEXT:    j .LBB5_5
-; RV32I-ZALRSC-NEXT:  .LBB5_4: # in Loop: Header=BB5_2 Depth=1
-; RV32I-ZALRSC-NEXT:    sltu a0, s2, a4
-; RV32I-ZALRSC-NEXT:  .LBB5_5: # %atomicrmw.start
-; RV32I-ZALRSC-NEXT:    # in Loop: Header=BB5_2 Depth=1
-; RV32I-ZALRSC-NEXT:    mv a2, a4
-; RV32I-ZALRSC-NEXT:    mv a3, a5
-; RV32I-ZALRSC-NEXT:    beqz a0, .LBB5_1
-; RV32I-ZALRSC-NEXT:  # %bb.6: # %atomicrmw.start
-; RV32I-ZALRSC-NEXT:    # in Loop: Header=BB5_2 Depth=1
-; RV32I-ZALRSC-NEXT:    mv a2, s2
-; RV32I-ZALRSC-NEXT:    mv a3, s0
-; RV32I-ZALRSC-NEXT:    j .LBB5_1
-; RV32I-ZALRSC-NEXT:  .LBB5_7: # %atomicrmw.end
-; RV32I-ZALRSC-NEXT:    mv a0, a4
-; RV32I-ZALRSC-NEXT:    mv a1, a5
-; RV32I-ZALRSC-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
-; RV32I-ZALRSC-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
-; RV32I-ZALRSC-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
-; RV32I-ZALRSC-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
-; RV32I-ZALRSC-NEXT:    addi sp, sp, 32
-; RV32I-ZALRSC-NEXT:    ret
-;
-; RV32IB-ZALRSC-LABEL: atomicrmw_min_i64_seq_cst:
-; RV32IB-ZALRSC:       # %bb.0:
-; RV32IB-ZALRSC-NEXT:    addi sp, sp, -32
-; RV32IB-ZALRSC-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IB-ZALRSC-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
-; RV32IB-ZALRSC-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IB-ZALRSC-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
-; RV32IB-ZALRSC-NEXT:    mv s0, a2
-; RV32IB-ZALRSC-NEXT:    mv s1, a0
-; RV32IB-ZALRSC-NEXT:    lw a4, 0(a0)
-; RV32IB-ZALRSC-NEXT:    lw a5, 4(a0)
-; RV32IB-ZALRSC-NEXT:    mv s2, a1
-; RV32IB-ZALRSC-NEXT:    j .LBB5_2
-; RV32IB-ZALRSC-NEXT:  .LBB5_1: # %atomicrmw.start
-; RV32IB-ZALRSC-NEXT:    # in Loop: Header=BB5_2 Depth=1
-; RV32IB-ZALRSC-NEXT:    sw a4, 8(sp)
-; RV32IB-ZALRSC-NEXT:    sw a5, 12(sp)
-; RV32IB-ZALRSC-NEXT:    addi a1, sp, 8
-; RV32IB-ZALRSC-NEXT:    li a4, 5
-; RV32IB-ZALRSC-NEXT:    li a5, 5
-; RV32IB-ZALRSC-NEXT:    mv a0, s1
-; RV32IB-ZALRSC-NEXT:    call __atomic_compare_exchange_8
-; RV32IB-ZALRSC-NEXT:    lw a4, 8(sp)
-; RV32IB-ZALRSC-NEXT:    lw a5, 12(sp)
-; RV32IB-ZALRSC-NEXT:    bnez a0, .LBB5_7
-; RV32IB-ZALRSC-NEXT:  .LBB5_2: # %atomicrmw.start
-; RV32IB-ZALRSC-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32IB-ZALRSC-NEXT:    beq a5, s0, .LBB5_4
-; RV32IB-ZALRSC-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32IB-ZALRSC-NEXT:    # in Loop: Header=BB5_2 Depth=1
-; RV32IB-ZALRSC-NEXT:    slt a0, a5, s0
-; RV32IB-ZALRSC-NEXT:    j .LBB5_5
-; RV32IB-ZALRSC-NEXT:  .LBB5_4: # in Loop: Header=BB5_2 Depth=1
-; RV32IB-ZALRSC-NEXT:    sltu a0, a4, s2
-; RV32IB-ZALRSC-NEXT:  .LBB5_5: # %atomicrmw.start
-; RV32IB-ZALRSC-NEXT:    # in Loop: Header=BB5_2 Depth=1
-; RV32IB-ZALRSC-NEXT:    mv a2, a4
-; RV32IB-ZALRSC-NEXT:    mv a3, a5
-; RV32IB-ZALRSC-NEXT:    bnez a0, .LBB5_1
-; RV32IB-ZALRSC-NEXT:  # %bb.6: # %atomicrmw.start
-; RV32IB-ZALRSC-NEXT:    # in Loop: Header=BB5_2 Depth=1
-; RV32IB-ZALRSC-NEXT:    mv a2, s2
-; RV32IB-ZALRSC-NEXT:    mv a3, s0
-; RV32IB-ZALRSC-NEXT:    j .LBB5_1
-; RV32IB-ZALRSC-NEXT:  .LBB5_7: # %atomicrmw.end
-; RV32IB-ZALRSC-NEXT:    mv a0, a4
-; RV32IB-ZALRSC-NEXT:    mv a1, a5
-; RV32IB-ZALRSC-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IB-ZALRSC-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
-; RV32IB-ZALRSC-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
-; RV32IB-ZALRSC-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
-; RV32IB-ZALRSC-NEXT:    addi sp, sp, 32
-; RV32IB-ZALRSC-NEXT:    ret
-;
-; RV32IA-LABEL: atomicrmw_min_i64_seq_cst:
-; RV32IA:       # %bb.0:
-; RV32IA-NEXT:    addi sp, sp, -32
-; RV32IA-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IA-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
-; RV32IA-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IA-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
-; RV32IA-NEXT:    mv s0, a2
-; RV32IA-NEXT:    mv s1, a0
-; RV32IA-NEXT:    lw a4, 0(a0)
-; RV32IA-NEXT:    lw a5, 4(a0)
-; RV32IA-NEXT:    mv s2, a1
-; RV32IA-NEXT:    j .LBB5_2
-; RV32IA-NEXT:  .LBB5_1: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB5_2 Depth=1
-; RV32IA-NEXT:    sw a4, 8(sp)
-; RV32IA-NEXT:    sw a5, 12(sp)
-; RV32IA-NEXT:    addi a1, sp, 8
-; RV32IA-NEXT:    li a4, 5
-; RV32IA-NEXT:    li a5, 5
-; RV32IA-NEXT:    mv a0, s1
-; RV32IA-NEXT:    call __atomic_compare_exchange_8
-; RV32IA-NEXT:    lw a4, 8(sp)
-; RV32IA-NEXT:    lw a5, 12(sp)
-; RV32IA-NEXT:    bnez a0, .LBB5_7
-; RV32IA-NEXT:  .LBB5_2: # %atomicrmw.start
-; RV32IA-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32IA-NEXT:    beq a5, s0, .LBB5_4
-; RV32IA-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB5_2 Depth=1
-; RV32IA-NEXT:    slt a0, s0, a5
-; RV32IA-NEXT:    j .LBB5_5
-; RV32IA-NEXT:  .LBB5_4: # in Loop: Header=BB5_2 Depth=1
-; RV32IA-NEXT:    sltu a0, s2, a4
-; RV32IA-NEXT:  .LBB5_5: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB5_2 Depth=1
-; RV32IA-NEXT:    mv a2, a4
-; RV32IA-NEXT:    mv a3, a5
-; RV32IA-NEXT:    beqz a0, .LBB5_1
-; RV32IA-NEXT:  # %bb.6: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB5_2 Depth=1
-; RV32IA-NEXT:    mv a2, s2
-; RV32IA-NEXT:    mv a3, s0
-; RV32IA-NEXT:    j .LBB5_1
-; RV32IA-NEXT:  .LBB5_7: # %atomicrmw.end
-; RV32IA-NEXT:    mv a0, a4
-; RV32IA-NEXT:    mv a1, a5
-; RV32IA-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IA-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
-; RV32IA-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
-; RV32IA-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
-; RV32IA-NEXT:    addi sp, sp, 32
-; RV32IA-NEXT:    ret
-;
-; RV64I-ZALRSC-LABEL: atomicrmw_min_i64_seq_cst:
-; RV64I-ZALRSC:       # %bb.0:
-; RV64I-ZALRSC-NEXT:  .LBB5_1: # =>This Inner Loop Header: Depth=1
-; RV64I-ZALRSC-NEXT:    lr.d.aqrl a2, (a0)
-; RV64I-ZALRSC-NEXT:    mv a3, a2
-; RV64I-ZALRSC-NEXT:    bge a1, a3, .LBB5_3
-; RV64I-ZALRSC-NEXT:  # %bb.2: # in Loop: Header=BB5_1 Depth=1
-; RV64I-ZALRSC-NEXT:    mv a3, a1
-; RV64I-ZALRSC-NEXT:  .LBB5_3: # in Loop: Header=BB5_1 Depth=1
-; RV64I-ZALRSC-NEXT:    sc.d.rl a3, a3, (a0)
-; RV64I-ZALRSC-NEXT:    bnez a3, .LBB5_1
-; RV64I-ZALRSC-NEXT:  # %bb.4:
-; RV64I-ZALRSC-NEXT:    mv a0, a2
-; RV64I-ZALRSC-NEXT:    ret
+; RV32IB-COMMON-LABEL: atomicrmw_min_i64_seq_cst:
+; RV32IB-COMMON:       # %bb.0:
+; RV32IB-COMMON-NEXT:    addi sp, sp, -32
+; RV32IB-COMMON-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32IB-COMMON-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32IB-COMMON-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32IB-COMMON-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
+; RV32IB-COMMON-NEXT:    mv s0, a2
+; RV32IB-COMMON-NEXT:    mv s1, a0
+; RV32IB-COMMON-NEXT:    lw a4, 0(a0)
+; RV32IB-COMMON-NEXT:    lw a5, 4(a0)
+; RV32IB-COMMON-NEXT:    mv s2, a1
+; RV32IB-COMMON-NEXT:    j .LBB5_2
+; RV32IB-COMMON-NEXT:  .LBB5_1: # %atomicrmw.start
+; RV32IB-COMMON-NEXT:    # in Loop: Header=BB5_2 Depth=1
+; RV32IB-COMMON-NEXT:    sw a4, 8(sp)
+; RV32IB-COMMON-NEXT:    sw a5, 12(sp)
+; RV32IB-COMMON-NEXT:    addi a1, sp, 8
+; RV32IB-COMMON-NEXT:    li a4, 5
+; RV32IB-COMMON-NEXT:    li a5, 5
+; RV32IB-COMMON-NEXT:    mv a0, s1
+; RV32IB-COMMON-NEXT:    call __atomic_compare_exchange_8
+; RV32IB-COMMON-NEXT:    lw a4, 8(sp)
+; RV32IB-COMMON-NEXT:    lw a5, 12(sp)
+; RV32IB-COMMON-NEXT:    bnez a0, .LBB5_7
+; RV32IB-COMMON-NEXT:  .LBB5_2: # %atomicrmw.start
+; RV32IB-COMMON-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32IB-COMMON-NEXT:    beq a5, s0, .LBB5_4
+; RV32IB-COMMON-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32IB-COMMON-NEXT:    # in Loop: Header=BB5_2 Depth=1
+; RV32IB-COMMON-NEXT:    slt a0, a5, s0
+; RV32IB-COMMON-NEXT:    j .LBB5_5
+; RV32IB-COMMON-NEXT:  .LBB5_4: # in Loop: Header=BB5_2 Depth=1
+; RV32IB-COMMON-NEXT:    sltu a0, a4, s2
+; RV32IB-COMMON-NEXT:  .LBB5_5: # %atomicrmw.start
+; RV32IB-COMMON-NEXT:    # in Loop: Header=BB5_2 Depth=1
+; RV32IB-COMMON-NEXT:    mv a2, a4
+; RV32IB-COMMON-NEXT:    mv a3, a5
+; RV32IB-COMMON-NEXT:    bnez a0, .LBB5_1
+; RV32IB-COMMON-NEXT:  # %bb.6: # %atomicrmw.start
+; RV32IB-COMMON-NEXT:    # in Loop: Header=BB5_2 Depth=1
+; RV32IB-COMMON-NEXT:    mv a2, s2
+; RV32IB-COMMON-NEXT:    mv a3, s0
+; RV32IB-COMMON-NEXT:    j .LBB5_1
+; RV32IB-COMMON-NEXT:  .LBB5_7: # %atomicrmw.end
+; RV32IB-COMMON-NEXT:    mv a0, a4
+; RV32IB-COMMON-NEXT:    mv a1, a5
+; RV32IB-COMMON-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32IB-COMMON-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32IB-COMMON-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32IB-COMMON-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32IB-COMMON-NEXT:    addi sp, sp, 32
+; RV32IB-COMMON-NEXT:    ret
 ;
 ; RV64IB-ZALRSC-LABEL: atomicrmw_min_i64_seq_cst:
 ; RV64IB-ZALRSC:       # %bb.0:
 ; RV64IB-ZALRSC-NEXT:  .LBB5_1: # =>This Inner Loop Header: Depth=1
 ; RV64IB-ZALRSC-NEXT:    lr.d.aqrl a2, (a0)
-; RV64IB-ZALRSC-NEXT:    min a3, a2, a1
+; RV64IB-ZALRSC-NEXT:    mv a3, a2
+; RV64IB-ZALRSC-NEXT:    bge a1, a3, .LBB5_3
+; RV64IB-ZALRSC-NEXT:  # %bb.2: # in Loop: Header=BB5_1 Depth=1
+; RV64IB-ZALRSC-NEXT:    mv a3, a1
+; RV64IB-ZALRSC-NEXT:  .LBB5_3: # in Loop: Header=BB5_1 Depth=1
 ; RV64IB-ZALRSC-NEXT:    sc.d.rl a3, a3, (a0)
 ; RV64IB-ZALRSC-NEXT:    bnez a3, .LBB5_1
-; RV64IB-ZALRSC-NEXT:  # %bb.2:
+; RV64IB-ZALRSC-NEXT:  # %bb.4:
 ; RV64IB-ZALRSC-NEXT:    mv a0, a2
 ; RV64IB-ZALRSC-NEXT:    ret
 ;
-; RV64IA-LABEL: atomicrmw_min_i64_seq_cst:
-; RV64IA:       # %bb.0:
-; RV64IA-NEXT:    amomin.d.aqrl a0, a1, (a0)
-; RV64IA-NEXT:    ret
+; RV64IB-ZALRSC-PERM-LABEL: atomicrmw_min_i64_seq_cst:
+; RV64IB-ZALRSC-PERM:       # %bb.0:
+; RV64IB-ZALRSC-PERM-NEXT:  .LBB5_1: # =>This Inner Loop Header: Depth=1
+; RV64IB-ZALRSC-PERM-NEXT:    lr.d.aqrl a2, (a0)
+; RV64IB-ZALRSC-PERM-NEXT:    min a3, a2, a1
+; RV64IB-ZALRSC-PERM-NEXT:    sc.d.rl a3, a3, (a0)
+; RV64IB-ZALRSC-PERM-NEXT:    bnez a3, .LBB5_1
+; RV64IB-ZALRSC-PERM-NEXT:  # %bb.2:
+; RV64IB-ZALRSC-PERM-NEXT:    mv a0, a2
+; RV64IB-ZALRSC-PERM-NEXT:    ret
+;
+; RV64IAB-LABEL: atomicrmw_min_i64_seq_cst:
+; RV64IAB:       # %bb.0:
+; RV64IAB-NEXT:    amomin.d.aqrl a0, a1, (a0)
+; RV64IAB-NEXT:    ret
   %1 = atomicrmw min ptr %a, i64 %b seq_cst
   ret i64 %1
 }
 
 define i64 @atomicrmw_umax_i64_seq_cst(ptr %a, i64 %b) nounwind {
-; RV32I-ZALRSC-LABEL: atomicrmw_umax_i64_seq_cst:
-; RV32I-ZALRSC:       # %bb.0:
-; RV32I-ZALRSC-NEXT:    addi sp, sp, -32
-; RV32I-ZALRSC-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
-; RV32I-ZALRSC-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
-; RV32I-ZALRSC-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
-; RV32I-ZALRSC-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
-; RV32I-ZALRSC-NEXT:    mv s0, a2
-; RV32I-ZALRSC-NEXT:    mv s1, a0
-; RV32I-ZALRSC-NEXT:    lw a4, 0(a0)
-; RV32I-ZALRSC-NEXT:    lw a5, 4(a0)
-; RV32I-ZALRSC-NEXT:    mv s2, a1
-; RV32I-ZALRSC-NEXT:    j .LBB6_2
-; RV32I-ZALRSC-NEXT:  .LBB6_1: # %atomicrmw.start
-; RV32I-ZALRSC-NEXT:    # in Loop: Header=BB6_2 Depth=1
-; RV32I-ZALRSC-NEXT:    sw a4, 8(sp)
-; RV32I-ZALRSC-NEXT:    sw a5, 12(sp)
-; RV32I-ZALRSC-NEXT:    addi a1, sp, 8
-; RV32I-ZALRSC-NEXT:    li a4, 5
-; RV32I-ZALRSC-NEXT:    li a5, 5
-; RV32I-ZALRSC-NEXT:    mv a0, s1
-; RV32I-ZALRSC-NEXT:    call __atomic_compare_exchange_8
-; RV32I-ZALRSC-NEXT:    lw a4, 8(sp)
-; RV32I-ZALRSC-NEXT:    lw a5, 12(sp)
-; RV32I-ZALRSC-NEXT:    bnez a0, .LBB6_7
-; RV32I-ZALRSC-NEXT:  .LBB6_2: # %atomicrmw.start
-; RV32I-ZALRSC-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32I-ZALRSC-NEXT:    beq a5, s0, .LBB6_4
-; RV32I-ZALRSC-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-ZALRSC-NEXT:    # in Loop: Header=BB6_2 Depth=1
-; RV32I-ZALRSC-NEXT:    sltu a0, s0, a5
-; RV32I-ZALRSC-NEXT:    j .LBB6_5
-; RV32I-ZALRSC-NEXT:  .LBB6_4: # in Loop: Header=BB6_2 Depth=1
-; RV32I-ZALRSC-NEXT:    sltu a0, s2, a4
-; RV32I-ZALRSC-NEXT:  .LBB6_5: # %atomicrmw.start
-; RV32I-ZALRSC-NEXT:    # in Loop: Header=BB6_2 Depth=1
-; RV32I-ZALRSC-NEXT:    mv a2, a4
-; RV32I-ZALRSC-NEXT:    mv a3, a5
-; RV32I-ZALRSC-NEXT:    bnez a0, .LBB6_1
-; RV32I-ZALRSC-NEXT:  # %bb.6: # %atomicrmw.start
-; RV32I-ZALRSC-NEXT:    # in Loop: Header=BB6_2 Depth=1
-; RV32I-ZALRSC-NEXT:    mv a2, s2
-; RV32I-ZALRSC-NEXT:    mv a3, s0
-; RV32I-ZALRSC-NEXT:    j .LBB6_1
-; RV32I-ZALRSC-NEXT:  .LBB6_7: # %atomicrmw.end
-; RV32I-ZALRSC-NEXT:    mv a0, a4
-; RV32I-ZALRSC-NEXT:    mv a1, a5
-; RV32I-ZALRSC-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
-; RV32I-ZALRSC-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
-; RV32I-ZALRSC-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
-; RV32I-ZALRSC-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
-; RV32I-ZALRSC-NEXT:    addi sp, sp, 32
-; RV32I-ZALRSC-NEXT:    ret
-;
-; RV32IB-ZALRSC-LABEL: atomicrmw_umax_i64_seq_cst:
-; RV32IB-ZALRSC:       # %bb.0:
-; RV32IB-ZALRSC-NEXT:    addi sp, sp, -32
-; RV32IB-ZALRSC-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IB-ZALRSC-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
-; RV32IB-ZALRSC-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IB-ZALRSC-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
-; RV32IB-ZALRSC-NEXT:    mv s0, a2
-; RV32IB-ZALRSC-NEXT:    mv s1, a0
-; RV32IB-ZALRSC-NEXT:    lw a4, 0(a0)
-; RV32IB-ZALRSC-NEXT:    lw a5, 4(a0)
-; RV32IB-ZALRSC-NEXT:    mv s2, a1
-; RV32IB-ZALRSC-NEXT:    j .LBB6_2
-; RV32IB-ZALRSC-NEXT:  .LBB6_1: # %atomicrmw.start
-; RV32IB-ZALRSC-NEXT:    # in Loop: Header=BB6_2 Depth=1
-; RV32IB-ZALRSC-NEXT:    sw a4, 8(sp)
-; RV32IB-ZALRSC-NEXT:    sw a5, 12(sp)
-; RV32IB-ZALRSC-NEXT:    addi a1, sp, 8
-; RV32IB-ZALRSC-NEXT:    li a4, 5
-; RV32IB-ZALRSC-NEXT:    li a5, 5
-; RV32IB-ZALRSC-NEXT:    mv a0, s1
-; RV32IB-ZALRSC-NEXT:    call __atomic_compare_exchange_8
-; RV32IB-ZALRSC-NEXT:    lw a4, 8(sp)
-; RV32IB-ZALRSC-NEXT:    lw a5, 12(sp)
-; RV32IB-ZALRSC-NEXT:    bnez a0, .LBB6_7
-; RV32IB-ZALRSC-NEXT:  .LBB6_2: # %atomicrmw.start
-; RV32IB-ZALRSC-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32IB-ZALRSC-NEXT:    beq a5, s0, .LBB6_4
-; RV32IB-ZALRSC-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32IB-ZALRSC-NEXT:    # in Loop: Header=BB6_2 Depth=1
-; RV32IB-ZALRSC-NEXT:    sltu a0, s0, a5
-; RV32IB-ZALRSC-NEXT:    j .LBB6_5
-; RV32IB-ZALRSC-NEXT:  .LBB6_4: # in Loop: Header=BB6_2 Depth=1
-; RV32IB-ZALRSC-NEXT:    sltu a0, s2, a4
-; RV32IB-ZALRSC-NEXT:  .LBB6_5: # %atomicrmw.start
-; RV32IB-ZALRSC-NEXT:    # in Loop: Header=BB6_2 Depth=1
-; RV32IB-ZALRSC-NEXT:    mv a2, a4
-; RV32IB-ZALRSC-NEXT:    mv a3, a5
-; RV32IB-ZALRSC-NEXT:    bnez a0, .LBB6_1
-; RV32IB-ZALRSC-NEXT:  # %bb.6: # %atomicrmw.start
-; RV32IB-ZALRSC-NEXT:    # in Loop: Header=BB6_2 Depth=1
-; RV32IB-ZALRSC-NEXT:    mv a2, s2
-; RV32IB-ZALRSC-NEXT:    mv a3, s0
-; RV32IB-ZALRSC-NEXT:    j .LBB6_1
-; RV32IB-ZALRSC-NEXT:  .LBB6_7: # %atomicrmw.end
-; RV32IB-ZALRSC-NEXT:    mv a0, a4
-; RV32IB-ZALRSC-NEXT:    mv a1, a5
-; RV32IB-ZALRSC-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IB-ZALRSC-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
-; RV32IB-ZALRSC-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
-; RV32IB-ZALRSC-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
-; RV32IB-ZALRSC-NEXT:    addi sp, sp, 32
-; RV32IB-ZALRSC-NEXT:    ret
-;
-; RV32IA-LABEL: atomicrmw_umax_i64_seq_cst:
-; RV32IA:       # %bb.0:
-; RV32IA-NEXT:    addi sp, sp, -32
-; RV32IA-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IA-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
-; RV32IA-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IA-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
-; RV32IA-NEXT:    mv s0, a2
-; RV32IA-NEXT:    mv s1, a0
-; RV32IA-NEXT:    lw a4, 0(a0)
-; RV32IA-NEXT:    lw a5, 4(a0)
-; RV32IA-NEXT:    mv s2, a1
-; RV32IA-NEXT:    j .LBB6_2
-; RV32IA-NEXT:  .LBB6_1: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB6_2 Depth=1
-; RV32IA-NEXT:    sw a4, 8(sp)
-; RV32IA-NEXT:    sw a5, 12(sp)
-; RV32IA-NEXT:    addi a1, sp, 8
-; RV32IA-NEXT:    li a4, 5
-; RV32IA-NEXT:    li a5, 5
-; RV32IA-NEXT:    mv a0, s1
-; RV32IA-NEXT:    call __atomic_compare_exchange_8
-; RV32IA-NEXT:    lw a4, 8(sp)
-; RV32IA-NEXT:    lw a5, 12(sp)
-; RV32IA-NEXT:    bnez a0, .LBB6_7
-; RV32IA-NEXT:  .LBB6_2: # %atomicrmw.start
-; RV32IA-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32IA-NEXT:    beq a5, s0, .LBB6_4
-; RV32IA-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB6_2 Depth=1
-; RV32IA-NEXT:    sltu a0, s0, a5
-; RV32IA-NEXT:    j .LBB6_5
-; RV32IA-NEXT:  .LBB6_4: # in Loop: Header=BB6_2 Depth=1
-; RV32IA-NEXT:    sltu a0, s2, a4
-; RV32IA-NEXT:  .LBB6_5: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB6_2 Depth=1
-; RV32IA-NEXT:    mv a2, a4
-; RV32IA-NEXT:    mv a3, a5
-; RV32IA-NEXT:    bnez a0, .LBB6_1
-; RV32IA-NEXT:  # %bb.6: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB6_2 Depth=1
-; RV32IA-NEXT:    mv a2, s2
-; RV32IA-NEXT:    mv a3, s0
-; RV32IA-NEXT:    j .LBB6_1
-; RV32IA-NEXT:  .LBB6_7: # %atomicrmw.end
-; RV32IA-NEXT:    mv a0, a4
-; RV32IA-NEXT:    mv a1, a5
-; RV32IA-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IA-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
-; RV32IA-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
-; RV32IA-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
-; RV32IA-NEXT:    addi sp, sp, 32
-; RV32IA-NEXT:    ret
-;
-; RV64I-ZALRSC-LABEL: atomicrmw_umax_i64_seq_cst:
-; RV64I-ZALRSC:       # %bb.0:
-; RV64I-ZALRSC-NEXT:  .LBB6_1: # =>This Inner Loop Header: Depth=1
-; RV64I-ZALRSC-NEXT:    lr.d.aqrl a2, (a0)
-; RV64I-ZALRSC-NEXT:    mv a3, a2
-; RV64I-ZALRSC-NEXT:    bgeu a3, a1, .LBB6_3
-; RV64I-ZALRSC-NEXT:  # %bb.2: # in Loop: Header=BB6_1 Depth=1
-; RV64I-ZALRSC-NEXT:    mv a3, a1
-; RV64I-ZALRSC-NEXT:  .LBB6_3: # in Loop: Header=BB6_1 Depth=1
-; RV64I-ZALRSC-NEXT:    sc.d.rl a3, a3, (a0)
-; RV64I-ZALRSC-NEXT:    bnez a3, .LBB6_1
-; RV64I-ZALRSC-NEXT:  # %bb.4:
-; RV64I-ZALRSC-NEXT:    mv a0, a2
-; RV64I-ZALRSC-NEXT:    ret
+; RV32IB-COMMON-LABEL: atomicrmw_umax_i64_seq_cst:
+; RV32IB-COMMON:       # %bb.0:
+; RV32IB-COMMON-NEXT:    addi sp, sp, -32
+; RV32IB-COMMON-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32IB-COMMON-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32IB-COMMON-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32IB-COMMON-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
+; RV32IB-COMMON-NEXT:    mv s0, a2
+; RV32IB-COMMON-NEXT:    mv s1, a0
+; RV32IB-COMMON-NEXT:    lw a4, 0(a0)
+; RV32IB-COMMON-NEXT:    lw a5, 4(a0)
+; RV32IB-COMMON-NEXT:    mv s2, a1
+; RV32IB-COMMON-NEXT:    j .LBB6_2
+; RV32IB-COMMON-NEXT:  .LBB6_1: # %atomicrmw.start
+; RV32IB-COMMON-NEXT:    # in Loop: Header=BB6_2 Depth=1
+; RV32IB-COMMON-NEXT:    sw a4, 8(sp)
+; RV32IB-COMMON-NEXT:    sw a5, 12(sp)
+; RV32IB-COMMON-NEXT:    addi a1, sp, 8
+; RV32IB-COMMON-NEXT:    li a4, 5
+; RV32IB-COMMON-NEXT:    li a5, 5
+; RV32IB-COMMON-NEXT:    mv a0, s1
+; RV32IB-COMMON-NEXT:    call __atomic_compare_exchange_8
+; RV32IB-COMMON-NEXT:    lw a4, 8(sp)
+; RV32IB-COMMON-NEXT:    lw a5, 12(sp)
+; RV32IB-COMMON-NEXT:    bnez a0, .LBB6_7
+; RV32IB-COMMON-NEXT:  .LBB6_2: # %atomicrmw.start
+; RV32IB-COMMON-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32IB-COMMON-NEXT:    beq a5, s0, .LBB6_4
+; RV32IB-COMMON-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32IB-COMMON-NEXT:    # in Loop: Header=BB6_2 Depth=1
+; RV32IB-COMMON-NEXT:    sltu a0, s0, a5
+; RV32IB-COMMON-NEXT:    j .LBB6_5
+; RV32IB-COMMON-NEXT:  .LBB6_4: # in Loop: Header=BB6_2 Depth=1
+; RV32IB-COMMON-NEXT:    sltu a0, s2, a4
+; RV32IB-COMMON-NEXT:  .LBB6_5: # %atomicrmw.start
+; RV32IB-COMMON-NEXT:    # in Loop: Header=BB6_2 Depth=1
+; RV32IB-COMMON-NEXT:    mv a2, a4
+; RV32IB-COMMON-NEXT:    mv a3, a5
+; RV32IB-COMMON-NEXT:    bnez a0, .LBB6_1
+; RV32IB-COMMON-NEXT:  # %bb.6: # %atomicrmw.start
+; RV32IB-COMMON-NEXT:    # in Loop: Header=BB6_2 Depth=1
+; RV32IB-COMMON-NEXT:    mv a2, s2
+; RV32IB-COMMON-NEXT:    mv a3, s0
+; RV32IB-COMMON-NEXT:    j .LBB6_1
+; RV32IB-COMMON-NEXT:  .LBB6_7: # %atomicrmw.end
+; RV32IB-COMMON-NEXT:    mv a0, a4
+; RV32IB-COMMON-NEXT:    mv a1, a5
+; RV32IB-COMMON-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32IB-COMMON-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32IB-COMMON-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32IB-COMMON-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32IB-COMMON-NEXT:    addi sp, sp, 32
+; RV32IB-COMMON-NEXT:    ret
 ;
 ; RV64IB-ZALRSC-LABEL: atomicrmw_umax_i64_seq_cst:
 ; RV64IB-ZALRSC:       # %bb.0:
 ; RV64IB-ZALRSC-NEXT:  .LBB6_1: # =>This Inner Loop Header: Depth=1
 ; RV64IB-ZALRSC-NEXT:    lr.d.aqrl a2, (a0)
-; RV64IB-ZALRSC-NEXT:    maxu a3, a2, a1
+; RV64IB-ZALRSC-NEXT:    mv a3, a2
+; RV64IB-ZALRSC-NEXT:    bgeu a3, a1, .LBB6_3
+; RV64IB-ZALRSC-NEXT:  # %bb.2: # in Loop: Header=BB6_1 Depth=1
+; RV64IB-ZALRSC-NEXT:    mv a3, a1
+; RV64IB-ZALRSC-NEXT:  .LBB6_3: # in Loop: Header=BB6_1 Depth=1
 ; RV64IB-ZALRSC-NEXT:    sc.d.rl a3, a3, (a0)
 ; RV64IB-ZALRSC-NEXT:    bnez a3, .LBB6_1
-; RV64IB-ZALRSC-NEXT:  # %bb.2:
+; RV64IB-ZALRSC-NEXT:  # %bb.4:
 ; RV64IB-ZALRSC-NEXT:    mv a0, a2
 ; RV64IB-ZALRSC-NEXT:    ret
 ;
-; RV64IA-LABEL: atomicrmw_umax_i64_seq_cst:
-; RV64IA:       # %bb.0:
-; RV64IA-NEXT:    amomaxu.d.aqrl a0, a1, (a0)
-; RV64IA-NEXT:    ret
+; RV64IB-ZALRSC-PERM-LABEL: atomicrmw_umax_i64_seq_cst:
+; RV64IB-ZALRSC-PERM:       # %bb.0:
+; RV64IB-ZALRSC-PERM-NEXT:  .LBB6_1: # =>This Inner Loop Header: Depth=1
+; RV64IB-ZALRSC-PERM-NEXT:    lr.d.aqrl a2, (a0)
+; RV64IB-ZALRSC-PERM-NEXT:    maxu a3, a2, a1
+; RV64IB-ZALRSC-PERM-NEXT:    sc.d.rl a3, a3, (a0)
+; RV64IB-ZALRSC-PERM-NEXT:    bnez a3, .LBB6_1
+; RV64IB-ZALRSC-PERM-NEXT:  # %bb.2:
+; RV64IB-ZALRSC-PERM-NEXT:    mv a0, a2
+; RV64IB-ZALRSC-PERM-NEXT:    ret
+;
+; RV64IAB-LABEL: atomicrmw_umax_i64_seq_cst:
+; RV64IAB:       # %bb.0:
+; RV64IAB-NEXT:    amomaxu.d.aqrl a0, a1, (a0)
+; RV64IAB-NEXT:    ret
   %1 = atomicrmw umax ptr %a, i64 %b seq_cst
   ret i64 %1
 }
 
 define i64 @atomicrmw_umin_i64_seq_cst(ptr %a, i64 %b) nounwind {
-; RV32I-ZALRSC-LABEL: atomicrmw_umin_i64_seq_cst:
-; RV32I-ZALRSC:       # %bb.0:
-; RV32I-ZALRSC-NEXT:    addi sp, sp, -32
-; RV32I-ZALRSC-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
-; RV32I-ZALRSC-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
-; RV32I-ZALRSC-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
-; RV32I-ZALRSC-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
-; RV32I-ZALRSC-NEXT:    mv s0, a2
-; RV32I-ZALRSC-NEXT:    mv s1, a0
-; RV32I-ZALRSC-NEXT:    lw a4, 0(a0)
-; RV32I-ZALRSC-NEXT:    lw a5, 4(a0)
-; RV32I-ZALRSC-NEXT:    mv s2, a1
-; RV32I-ZALRSC-NEXT:    j .LBB7_2
-; RV32I-ZALRSC-NEXT:  .LBB7_1: # %atomicrmw.start
-; RV32I-ZALRSC-NEXT:    # in Loop: Header=BB7_2 Depth=1
-; RV32I-ZALRSC-NEXT:    sw a4, 8(sp)
-; RV32I-ZALRSC-NEXT:    sw a5, 12(sp)
-; RV32I-ZALRSC-NEXT:    addi a1, sp, 8
-; RV32I-ZALRSC-NEXT:    li a4, 5
-; RV32I-ZALRSC-NEXT:    li a5, 5
-; RV32I-ZALRSC-NEXT:    mv a0, s1
-; RV32I-ZALRSC-NEXT:    call __atomic_compare_exchange_8
-; RV32I-ZALRSC-NEXT:    lw a4, 8(sp)
-; RV32I-ZALRSC-NEXT:    lw a5, 12(sp)
-; RV32I-ZALRSC-NEXT:    bnez a0, .LBB7_7
-; RV32I-ZALRSC-NEXT:  .LBB7_2: # %atomicrmw.start
-; RV32I-ZALRSC-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32I-ZALRSC-NEXT:    beq a5, s0, .LBB7_4
-; RV32I-ZALRSC-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-ZALRSC-NEXT:    # in Loop: Header=BB7_2 Depth=1
-; RV32I-ZALRSC-NEXT:    sltu a0, s0, a5
-; RV32I-ZALRSC-NEXT:    j .LBB7_5
-; RV32I-ZALRSC-NEXT:  .LBB7_4: # in Loop: Header=BB7_2 Depth=1
-; RV32I-ZALRSC-NEXT:    sltu a0, s2, a4
-; RV32I-ZALRSC-NEXT:  .LBB7_5: # %atomicrmw.start
-; RV32I-ZALRSC-NEXT:    # in Loop: Header=BB7_2 Depth=1
-; RV32I-ZALRSC-NEXT:    mv a2, a4
-; RV32I-ZALRSC-NEXT:    mv a3, a5
-; RV32I-ZALRSC-NEXT:    beqz a0, .LBB7_1
-; RV32I-ZALRSC-NEXT:  # %bb.6: # %atomicrmw.start
-; RV32I-ZALRSC-NEXT:    # in Loop: Header=BB7_2 Depth=1
-; RV32I-ZALRSC-NEXT:    mv a2, s2
-; RV32I-ZALRSC-NEXT:    mv a3, s0
-; RV32I-ZALRSC-NEXT:    j .LBB7_1
-; RV32I-ZALRSC-NEXT:  .LBB7_7: # %atomicrmw.end
-; RV32I-ZALRSC-NEXT:    mv a0, a4
-; RV32I-ZALRSC-NEXT:    mv a1, a5
-; RV32I-ZALRSC-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
-; RV32I-ZALRSC-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
-; RV32I-ZALRSC-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
-; RV32I-ZALRSC-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
-; RV32I-ZALRSC-NEXT:    addi sp, sp, 32
-; RV32I-ZALRSC-NEXT:    ret
-;
-; RV32IB-ZALRSC-LABEL: atomicrmw_umin_i64_seq_cst:
-; RV32IB-ZALRSC:       # %bb.0:
-; RV32IB-ZALRSC-NEXT:    addi sp, sp, -32
-; RV32IB-ZALRSC-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IB-ZALRSC-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
-; RV32IB-ZALRSC-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IB-ZALRSC-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
-; RV32IB-ZALRSC-NEXT:    mv s0, a2
-; RV32IB-ZALRSC-NEXT:    mv s1, a0
-; RV32IB-ZALRSC-NEXT:    lw a4, 0(a0)
-; RV32IB-ZALRSC-NEXT:    lw a5, 4(a0)
-; RV32IB-ZALRSC-NEXT:    mv s2, a1
-; RV32IB-ZALRSC-NEXT:    j .LBB7_2
-; RV32IB-ZALRSC-NEXT:  .LBB7_1: # %atomicrmw.start
-; RV32IB-ZALRSC-NEXT:    # in Loop: Header=BB7_2 Depth=1
-; RV32IB-ZALRSC-NEXT:    sw a4, 8(sp)
-; RV32IB-ZALRSC-NEXT:    sw a5, 12(sp)
-; RV32IB-ZALRSC-NEXT:    addi a1, sp, 8
-; RV32IB-ZALRSC-NEXT:    li a4, 5
-; RV32IB-ZALRSC-NEXT:    li a5, 5
-; RV32IB-ZALRSC-NEXT:    mv a0, s1
-; RV32IB-ZALRSC-NEXT:    call __atomic_compare_exchange_8
-; RV32IB-ZALRSC-NEXT:    lw a4, 8(sp)
-; RV32IB-ZALRSC-NEXT:    lw a5, 12(sp)
-; RV32IB-ZALRSC-NEXT:    bnez a0, .LBB7_7
-; RV32IB-ZALRSC-NEXT:  .LBB7_2: # %atomicrmw.start
-; RV32IB-ZALRSC-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32IB-ZALRSC-NEXT:    beq a5, s0, .LBB7_4
-; RV32IB-ZALRSC-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32IB-ZALRSC-NEXT:    # in Loop: Header=BB7_2 Depth=1
-; RV32IB-ZALRSC-NEXT:    sltu a0, a5, s0
-; RV32IB-ZALRSC-NEXT:    j .LBB7_5
-; RV32IB-ZALRSC-NEXT:  .LBB7_4: # in Loop: Header=BB7_2 Depth=1
-; RV32IB-ZALRSC-NEXT:    sltu a0, a4, s2
-; RV32IB-ZALRSC-NEXT:  .LBB7_5: # %atomicrmw.start
-; RV32IB-ZALRSC-NEXT:    # in Loop: Header=BB7_2 Depth=1
-; RV32IB-ZALRSC-NEXT:    mv a2, a4
-; RV32IB-ZALRSC-NEXT:    mv a3, a5
-; RV32IB-ZALRSC-NEXT:    bnez a0, .LBB7_1
-; RV32IB-ZALRSC-NEXT:  # %bb.6: # %atomicrmw.start
-; RV32IB-ZALRSC-NEXT:    # in Loop: Header=BB7_2 Depth=1
-; RV32IB-ZALRSC-NEXT:    mv a2, s2
-; RV32IB-ZALRSC-NEXT:    mv a3, s0
-; RV32IB-ZALRSC-NEXT:    j .LBB7_1
-; RV32IB-ZALRSC-NEXT:  .LBB7_7: # %atomicrmw.end
-; RV32IB-ZALRSC-NEXT:    mv a0, a4
-; RV32IB-ZALRSC-NEXT:    mv a1, a5
-; RV32IB-ZALRSC-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IB-ZALRSC-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
-; RV32IB-ZALRSC-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
-; RV32IB-ZALRSC-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
-; RV32IB-ZALRSC-NEXT:    addi sp, sp, 32
-; RV32IB-ZALRSC-NEXT:    ret
-;
-; RV32IA-LABEL: atomicrmw_umin_i64_seq_cst:
-; RV32IA:       # %bb.0:
-; RV32IA-NEXT:    addi sp, sp, -32
-; RV32IA-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IA-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
-; RV32IA-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IA-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
-; RV32IA-NEXT:    mv s0, a2
-; RV32IA-NEXT:    mv s1, a0
-; RV32IA-NEXT:    lw a4, 0(a0)
-; RV32IA-NEXT:    lw a5, 4(a0)
-; RV32IA-NEXT:    mv s2, a1
-; RV32IA-NEXT:    j .LBB7_2
-; RV32IA-NEXT:  .LBB7_1: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB7_2 Depth=1
-; RV32IA-NEXT:    sw a4, 8(sp)
-; RV32IA-NEXT:    sw a5, 12(sp)
-; RV32IA-NEXT:    addi a1, sp, 8
-; RV32IA-NEXT:    li a4, 5
-; RV32IA-NEXT:    li a5, 5
-; RV32IA-NEXT:    mv a0, s1
-; RV32IA-NEXT:    call __atomic_compare_exchange_8
-; RV32IA-NEXT:    lw a4, 8(sp)
-; RV32IA-NEXT:    lw a5, 12(sp)
-; RV32IA-NEXT:    bnez a0, .LBB7_7
-; RV32IA-NEXT:  .LBB7_2: # %atomicrmw.start
-; RV32IA-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32IA-NEXT:    beq a5, s0, .LBB7_4
-; RV32IA-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB7_2 Depth=1
-; RV32IA-NEXT:    sltu a0, s0, a5
-; RV32IA-NEXT:    j .LBB7_5
-; RV32IA-NEXT:  .LBB7_4: # in Loop: Header=BB7_2 Depth=1
-; RV32IA-NEXT:    sltu a0, s2, a4
-; RV32IA-NEXT:  .LBB7_5: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB7_2 Depth=1
-; RV32IA-NEXT:    mv a2, a4
-; RV32IA-NEXT:    mv a3, a5
-; RV32IA-NEXT:    beqz a0, .LBB7_1
-; RV32IA-NEXT:  # %bb.6: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB7_2 Depth=1
-; RV32IA-NEXT:    mv a2, s2
-; RV32IA-NEXT:    mv a3, s0
-; RV32IA-NEXT:    j .LBB7_1
-; RV32IA-NEXT:  .LBB7_7: # %atomicrmw.end
-; RV32IA-NEXT:    mv a0, a4
-; RV32IA-NEXT:    mv a1, a5
-; RV32IA-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IA-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
-; RV32IA-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
-; RV32IA-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
-; RV32IA-NEXT:    addi sp, sp, 32
-; RV32IA-NEXT:    ret
-;
-; RV64I-ZALRSC-LABEL: atomicrmw_umin_i64_seq_cst:
-; RV64I-ZALRSC:       # %bb.0:
-; RV64I-ZALRSC-NEXT:  .LBB7_1: # =>This Inner Loop Header: Depth=1
-; RV64I-ZALRSC-NEXT:    lr.d.aqrl a2, (a0)
-; RV64I-ZALRSC-NEXT:    mv a3, a2
-; RV64I-ZALRSC-NEXT:    bgeu a1, a3, .LBB7_3
-; RV64I-ZALRSC-NEXT:  # %bb.2: # in Loop: Header=BB7_1 Depth=1
-; RV64I-ZALRSC-NEXT:    mv a3, a1
-; RV64I-ZALRSC-NEXT:  .LBB7_3: # in Loop: Header=BB7_1 Depth=1
-; RV64I-ZALRSC-NEXT:    sc.d.rl a3, a3, (a0)
-; RV64I-ZALRSC-NEXT:    bnez a3, .LBB7_1
-; RV64I-ZALRSC-NEXT:  # %bb.4:
-; RV64I-ZALRSC-NEXT:    mv a0, a2
-; RV64I-ZALRSC-NEXT:    ret
+; RV32IB-COMMON-LABEL: atomicrmw_umin_i64_seq_cst:
+; RV32IB-COMMON:       # %bb.0:
+; RV32IB-COMMON-NEXT:    addi sp, sp, -32
+; RV32IB-COMMON-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32IB-COMMON-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32IB-COMMON-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32IB-COMMON-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
+; RV32IB-COMMON-NEXT:    mv s0, a2
+; RV32IB-COMMON-NEXT:    mv s1, a0
+; RV32IB-COMMON-NEXT:    lw a4, 0(a0)
+; RV32IB-COMMON-NEXT:    lw a5, 4(a0)
+; RV32IB-COMMON-NEXT:    mv s2, a1
+; RV32IB-COMMON-NEXT:    j .LBB7_2
+; RV32IB-COMMON-NEXT:  .LBB7_1: # %atomicrmw.start
+; RV32IB-COMMON-NEXT:    # in Loop: Header=BB7_2 Depth=1
+; RV32IB-COMMON-NEXT:    sw a4, 8(sp)
+; RV32IB-COMMON-NEXT:    sw a5, 12(sp)
+; RV32IB-COMMON-NEXT:    addi a1, sp, 8
+; RV32IB-COMMON-NEXT:    li a4, 5
+; RV32IB-COMMON-NEXT:    li a5, 5
+; RV32IB-COMMON-NEXT:    mv a0, s1
+; RV32IB-COMMON-NEXT:    call __atomic_compare_exchange_8
+; RV32IB-COMMON-NEXT:    lw a4, 8(sp)
+; RV32IB-COMMON-NEXT:    lw a5, 12(sp)
+; RV32IB-COMMON-NEXT:    bnez a0, .LBB7_7
+; RV32IB-COMMON-NEXT:  .LBB7_2: # %atomicrmw.start
+; RV32IB-COMMON-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32IB-COMMON-NEXT:    beq a5, s0, .LBB7_4
+; RV32IB-COMMON-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32IB-COMMON-NEXT:    # in Loop: Header=BB7_2 Depth=1
+; RV32IB-COMMON-NEXT:    sltu a0, a5, s0
+; RV32IB-COMMON-NEXT:    j .LBB7_5
+; RV32IB-COMMON-NEXT:  .LBB7_4: # in Loop: Header=BB7_2 Depth=1
+; RV32IB-COMMON-NEXT:    sltu a0, a4, s2
+; RV32IB-COMMON-NEXT:  .LBB7_5: # %atomicrmw.start
+; RV32IB-COMMON-NEXT:    # in Loop: Header=BB7_2 Depth=1
+; RV32IB-COMMON-NEXT:    mv a2, a4
+; RV32IB-COMMON-NEXT:    mv a3, a5
+; RV32IB-COMMON-NEXT:    bnez a0, .LBB7_1
+; RV32IB-COMMON-NEXT:  # %bb.6: # %atomicrmw.start
+; RV32IB-COMMON-NEXT:    # in Loop: Header=BB7_2 Depth=1
+; RV32IB-COMMON-NEXT:    mv a2, s2
+; RV32IB-COMMON-NEXT:    mv a3, s0
+; RV32IB-COMMON-NEXT:    j .LBB7_1
+; RV32IB-COMMON-NEXT:  .LBB7_7: # %atomicrmw.end
+; RV32IB-COMMON-NEXT:    mv a0, a4
+; RV32IB-COMMON-NEXT:    mv a1, a5
+; RV32IB-COMMON-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32IB-COMMON-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32IB-COMMON-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32IB-COMMON-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32IB-COMMON-NEXT:    addi sp, sp, 32
+; RV32IB-COMMON-NEXT:    ret
 ;
 ; RV64IB-ZALRSC-LABEL: atomicrmw_umin_i64_seq_cst:
 ; RV64IB-ZALRSC:       # %bb.0:
 ; RV64IB-ZALRSC-NEXT:  .LBB7_1: # =>This Inner Loop Header: Depth=1
 ; RV64IB-ZALRSC-NEXT:    lr.d.aqrl a2, (a0)
-; RV64IB-ZALRSC-NEXT:    minu a3, a2, a1
+; RV64IB-ZALRSC-NEXT:    mv a3, a2
+; RV64IB-ZALRSC-NEXT:    bgeu a1, a3, .LBB7_3
+; RV64IB-ZALRSC-NEXT:  # %bb.2: # in Loop: Header=BB7_1 Depth=1
+; RV64IB-ZALRSC-NEXT:    mv a3, a1
+; RV64IB-ZALRSC-NEXT:  .LBB7_3: # in Loop: Header=BB7_1 Depth=1
 ; RV64IB-ZALRSC-NEXT:    sc.d.rl a3, a3, (a0)
 ; RV64IB-ZALRSC-NEXT:    bnez a3, .LBB7_1
-; RV64IB-ZALRSC-NEXT:  # %bb.2:
+; RV64IB-ZALRSC-NEXT:  # %bb.4:
 ; RV64IB-ZALRSC-NEXT:    mv a0, a2
 ; RV64IB-ZALRSC-NEXT:    ret
 ;
-; RV64IA-LABEL: atomicrmw_umin_i64_seq_cst:
-; RV64IA:       # %bb.0:
-; RV64IA-NEXT:    amominu.d.aqrl a0, a1, (a0)
-; RV64IA-NEXT:    ret
+; RV64IB-ZALRSC-PERM-LABEL: atomicrmw_umin_i64_seq_cst:
+; RV64IB-ZALRSC-PERM:       # %bb.0:
+; RV64IB-ZALRSC-PERM-NEXT:  .LBB7_1: # =>This Inner Loop Header: Depth=1
+; RV64IB-ZALRSC-PERM-NEXT:    lr.d.aqrl a2, (a0)
+; RV64IB-ZALRSC-PERM-NEXT:    minu a3, a2, a1
+; RV64IB-ZALRSC-PERM-NEXT:    sc.d.rl a3, a3, (a0)
+; RV64IB-ZALRSC-PERM-NEXT:    bnez a3, .LBB7_1
+; RV64IB-ZALRSC-PERM-NEXT:  # %bb.2:
+; RV64IB-ZALRSC-PERM-NEXT:    mv a0, a2
+; RV64IB-ZALRSC-PERM-NEXT:    ret
+;
+; RV64IAB-LABEL: atomicrmw_umin_i64_seq_cst:
+; RV64IAB:       # %bb.0:
+; RV64IAB-NEXT:    amominu.d.aqrl a0, a1, (a0)
+; RV64IAB-NEXT:    ret
   %1 = atomicrmw umin ptr %a, i64 %b seq_cst
   ret i64 %1
 }
    
    
More information about the llvm-commits
mailing list