[llvm] r334590 - [RISCV] Codegen support for atomic operations on RV32I

Alex Bradbury via llvm-commits llvm-commits at lists.llvm.org
Wed Jun 13 04:58:47 PDT 2018


Author: asb
Date: Wed Jun 13 04:58:46 2018
New Revision: 334590

URL: http://llvm.org/viewvc/llvm-project?rev=334590&view=rev
Log:
[RISCV] Codegen support for atomic operations on RV32I

This patch adds lowering for atomic fences and relies on AtomicExpandPass to
lower atomic loads/stores, atomic rmw, and cmpxchg to __atomic_* libcalls.

test/CodeGen/RISCV/atomic-* are modelled on the exhaustive
test/CodeGen/PPC/atomics-regression.ll, and will prove more useful once RV32A
codegen support is introduced.

Fence mappings are taken from table A.6 in the current draft of version 2.3 of
the RISC-V Instruction Set Manual, which incorporates the memory model changes
and definitions contributed by the RISC-V Memory Consistency Model task group.

Differential Revision: https://reviews.llvm.org/D47587

Added:
    llvm/trunk/test/CodeGen/RISCV/atomic-cmpxchg.ll
    llvm/trunk/test/CodeGen/RISCV/atomic-fence.ll
    llvm/trunk/test/CodeGen/RISCV/atomic-load-store.ll
    llvm/trunk/test/CodeGen/RISCV/atomic-rmw.ll
Modified:
    llvm/trunk/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/trunk/lib/Target/RISCV/RISCVInstrInfo.td
    llvm/trunk/lib/Target/RISCV/RISCVTargetMachine.cpp

Modified: llvm/trunk/lib/Target/RISCV/RISCVISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/RISCV/RISCVISelLowering.cpp?rev=334590&r1=334589&r2=334590&view=diff
==============================================================================
--- llvm/trunk/lib/Target/RISCV/RISCVISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/RISCV/RISCVISelLowering.cpp Wed Jun 13 04:58:46 2018
@@ -137,6 +137,9 @@ RISCVTargetLowering::RISCVTargetLowering
   setOperationAction(ISD::BlockAddress, XLenVT, Custom);
   setOperationAction(ISD::ConstantPool, XLenVT, Custom);
 
+  // Atomic operations aren't suported in the base RV32I ISA.
+  setMaxAtomicSizeInBitsSupported(0);
+
   setBooleanContents(ZeroOrOneBooleanContent);
 
   // Function alignments (log2).

Modified: llvm/trunk/lib/Target/RISCV/RISCVInstrInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/RISCV/RISCVInstrInfo.td?rev=334590&r1=334589&r2=334590&view=diff
==============================================================================
--- llvm/trunk/lib/Target/RISCV/RISCVInstrInfo.td (original)
+++ llvm/trunk/lib/Target/RISCV/RISCVInstrInfo.td Wed Jun 13 04:58:46 2018
@@ -743,6 +743,20 @@ defm : StPat<truncstorei8, SB, GPR>;
 defm : StPat<truncstorei16, SH, GPR>;
 defm : StPat<store, SW, GPR>;
 
+/// Fences
+
+// Refer to Table A.6 in the version 2.3 draft of the RISC-V Instruction Set
+// Manual: Volume I.
+
+// fence acquire -> fence r, rw
+def : Pat<(atomic_fence (i32 4), (imm)), (FENCE 0b10, 0b11)>;
+// fence release -> fence rw, w
+def : Pat<(atomic_fence (i32 5), (imm)), (FENCE 0b11, 0b1)>;
+// fence acq_rel -> fence.tso
+def : Pat<(atomic_fence (i32 6), (imm)), (FENCE_TSO)>;
+// fence seq_cst -> fence rw, rw
+def : Pat<(atomic_fence (i32 7), (imm)), (FENCE 0b11, 0b11)>;
+
 /// Other pseudo-instructions
 
 // Pessimistically assume the stack pointer will be clobbered

Modified: llvm/trunk/lib/Target/RISCV/RISCVTargetMachine.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/RISCV/RISCVTargetMachine.cpp?rev=334590&r1=334589&r2=334590&view=diff
==============================================================================
--- llvm/trunk/lib/Target/RISCV/RISCVTargetMachine.cpp (original)
+++ llvm/trunk/lib/Target/RISCV/RISCVTargetMachine.cpp Wed Jun 13 04:58:46 2018
@@ -75,6 +75,7 @@ public:
     return getTM<RISCVTargetMachine>();
   }
 
+  void addIRPasses() override;
   bool addInstSelector() override;
   void addPreEmitPass() override;
 };
@@ -84,6 +85,11 @@ TargetPassConfig *RISCVTargetMachine::cr
   return new RISCVPassConfig(*this, PM);
 }
 
+void RISCVPassConfig::addIRPasses() {
+  addPass(createAtomicExpandPass());
+  TargetPassConfig::addIRPasses();
+}
+
 bool RISCVPassConfig::addInstSelector() {
   addPass(createRISCVISelDag(getRISCVTargetMachine()));
 

Added: llvm/trunk/test/CodeGen/RISCV/atomic-cmpxchg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/RISCV/atomic-cmpxchg.ll?rev=334590&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/RISCV/atomic-cmpxchg.ll (added)
+++ llvm/trunk/test/CodeGen/RISCV/atomic-cmpxchg.ll Wed Jun 13 04:58:46 2018
@@ -0,0 +1,720 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV32I %s
+
+define void @cmpxchg_i8_monotonic_monotonic(i8* %ptr, i8 %cmp, i8 %val) {
+; RV32I-LABEL: cmpxchg_i8_monotonic_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sb a1, 11(sp)
+; RV32I-NEXT:    addi a1, sp, 11
+; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    call __atomic_compare_exchange_1
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val monotonic monotonic
+  ret void
+}
+
+define void @cmpxchg_i8_acquire_monotonic(i8* %ptr, i8 %cmp, i8 %val) {
+; RV32I-LABEL: cmpxchg_i8_acquire_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sb a1, 11(sp)
+; RV32I-NEXT:    addi a1, sp, 11
+; RV32I-NEXT:    addi a3, zero, 2
+; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    call __atomic_compare_exchange_1
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val acquire monotonic
+  ret void
+}
+
+define void @cmpxchg_i8_acquire_acquire(i8* %ptr, i8 %cmp, i8 %val) {
+; RV32I-LABEL: cmpxchg_i8_acquire_acquire:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sb a1, 11(sp)
+; RV32I-NEXT:    addi a1, sp, 11
+; RV32I-NEXT:    addi a3, zero, 2
+; RV32I-NEXT:    mv a4, a3
+; RV32I-NEXT:    call __atomic_compare_exchange_1
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val acquire acquire
+  ret void
+}
+
+define void @cmpxchg_i8_release_monotonic(i8* %ptr, i8 %cmp, i8 %val) {
+; RV32I-LABEL: cmpxchg_i8_release_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sb a1, 11(sp)
+; RV32I-NEXT:    addi a1, sp, 11
+; RV32I-NEXT:    addi a3, zero, 3
+; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    call __atomic_compare_exchange_1
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val release monotonic
+  ret void
+}
+
+define void @cmpxchg_i8_release_acquire(i8* %ptr, i8 %cmp, i8 %val) {
+; RV32I-LABEL: cmpxchg_i8_release_acquire:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sb a1, 11(sp)
+; RV32I-NEXT:    addi a1, sp, 11
+; RV32I-NEXT:    addi a3, zero, 3
+; RV32I-NEXT:    addi a4, zero, 2
+; RV32I-NEXT:    call __atomic_compare_exchange_1
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val release acquire
+  ret void
+}
+
+define void @cmpxchg_i8_acq_rel_monotonic(i8* %ptr, i8 %cmp, i8 %val) {
+; RV32I-LABEL: cmpxchg_i8_acq_rel_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sb a1, 11(sp)
+; RV32I-NEXT:    addi a1, sp, 11
+; RV32I-NEXT:    addi a3, zero, 4
+; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    call __atomic_compare_exchange_1
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val acq_rel monotonic
+  ret void
+}
+
+define void @cmpxchg_i8_acq_rel_acquire(i8* %ptr, i8 %cmp, i8 %val) {
+; RV32I-LABEL: cmpxchg_i8_acq_rel_acquire:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sb a1, 11(sp)
+; RV32I-NEXT:    addi a1, sp, 11
+; RV32I-NEXT:    addi a3, zero, 4
+; RV32I-NEXT:    addi a4, zero, 2
+; RV32I-NEXT:    call __atomic_compare_exchange_1
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val acq_rel acquire
+  ret void
+}
+
+define void @cmpxchg_i8_seq_cst_monotonic(i8* %ptr, i8 %cmp, i8 %val) {
+; RV32I-LABEL: cmpxchg_i8_seq_cst_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sb a1, 11(sp)
+; RV32I-NEXT:    addi a1, sp, 11
+; RV32I-NEXT:    addi a3, zero, 5
+; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    call __atomic_compare_exchange_1
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val seq_cst monotonic
+  ret void
+}
+
+define void @cmpxchg_i8_seq_cst_acquire(i8* %ptr, i8 %cmp, i8 %val) {
+; RV32I-LABEL: cmpxchg_i8_seq_cst_acquire:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sb a1, 11(sp)
+; RV32I-NEXT:    addi a1, sp, 11
+; RV32I-NEXT:    addi a3, zero, 5
+; RV32I-NEXT:    addi a4, zero, 2
+; RV32I-NEXT:    call __atomic_compare_exchange_1
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val seq_cst acquire
+  ret void
+}
+
+define void @cmpxchg_i8_seq_cst_seq_cst(i8* %ptr, i8 %cmp, i8 %val) {
+; RV32I-LABEL: cmpxchg_i8_seq_cst_seq_cst:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sb a1, 11(sp)
+; RV32I-NEXT:    addi a1, sp, 11
+; RV32I-NEXT:    addi a3, zero, 5
+; RV32I-NEXT:    mv a4, a3
+; RV32I-NEXT:    call __atomic_compare_exchange_1
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val seq_cst seq_cst
+  ret void
+}
+
+define void @cmpxchg_i16_monotonic_monotonic(i16* %ptr, i16 %cmp, i16 %val) {
+; RV32I-LABEL: cmpxchg_i16_monotonic_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sh a1, 10(sp)
+; RV32I-NEXT:    addi a1, sp, 10
+; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    call __atomic_compare_exchange_2
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val monotonic monotonic
+  ret void
+}
+
+define void @cmpxchg_i16_acquire_monotonic(i16* %ptr, i16 %cmp, i16 %val) {
+; RV32I-LABEL: cmpxchg_i16_acquire_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sh a1, 10(sp)
+; RV32I-NEXT:    addi a1, sp, 10
+; RV32I-NEXT:    addi a3, zero, 2
+; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    call __atomic_compare_exchange_2
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val acquire monotonic
+  ret void
+}
+
+define void @cmpxchg_i16_acquire_acquire(i16* %ptr, i16 %cmp, i16 %val) {
+; RV32I-LABEL: cmpxchg_i16_acquire_acquire:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sh a1, 10(sp)
+; RV32I-NEXT:    addi a1, sp, 10
+; RV32I-NEXT:    addi a3, zero, 2
+; RV32I-NEXT:    mv a4, a3
+; RV32I-NEXT:    call __atomic_compare_exchange_2
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val acquire acquire
+  ret void
+}
+
+define void @cmpxchg_i16_release_monotonic(i16* %ptr, i16 %cmp, i16 %val) {
+; RV32I-LABEL: cmpxchg_i16_release_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sh a1, 10(sp)
+; RV32I-NEXT:    addi a1, sp, 10
+; RV32I-NEXT:    addi a3, zero, 3
+; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    call __atomic_compare_exchange_2
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val release monotonic
+  ret void
+}
+
+define void @cmpxchg_i16_release_acquire(i16* %ptr, i16 %cmp, i16 %val) {
+; RV32I-LABEL: cmpxchg_i16_release_acquire:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sh a1, 10(sp)
+; RV32I-NEXT:    addi a1, sp, 10
+; RV32I-NEXT:    addi a3, zero, 3
+; RV32I-NEXT:    addi a4, zero, 2
+; RV32I-NEXT:    call __atomic_compare_exchange_2
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val release acquire
+  ret void
+}
+
+define void @cmpxchg_i16_acq_rel_monotonic(i16* %ptr, i16 %cmp, i16 %val) {
+; RV32I-LABEL: cmpxchg_i16_acq_rel_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sh a1, 10(sp)
+; RV32I-NEXT:    addi a1, sp, 10
+; RV32I-NEXT:    addi a3, zero, 4
+; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    call __atomic_compare_exchange_2
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val acq_rel monotonic
+  ret void
+}
+
+define void @cmpxchg_i16_acq_rel_acquire(i16* %ptr, i16 %cmp, i16 %val) {
+; RV32I-LABEL: cmpxchg_i16_acq_rel_acquire:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sh a1, 10(sp)
+; RV32I-NEXT:    addi a1, sp, 10
+; RV32I-NEXT:    addi a3, zero, 4
+; RV32I-NEXT:    addi a4, zero, 2
+; RV32I-NEXT:    call __atomic_compare_exchange_2
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val acq_rel acquire
+  ret void
+}
+
+define void @cmpxchg_i16_seq_cst_monotonic(i16* %ptr, i16 %cmp, i16 %val) {
+; RV32I-LABEL: cmpxchg_i16_seq_cst_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sh a1, 10(sp)
+; RV32I-NEXT:    addi a1, sp, 10
+; RV32I-NEXT:    addi a3, zero, 5
+; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    call __atomic_compare_exchange_2
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val seq_cst monotonic
+  ret void
+}
+
+define void @cmpxchg_i16_seq_cst_acquire(i16* %ptr, i16 %cmp, i16 %val) {
+; RV32I-LABEL: cmpxchg_i16_seq_cst_acquire:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sh a1, 10(sp)
+; RV32I-NEXT:    addi a1, sp, 10
+; RV32I-NEXT:    addi a3, zero, 5
+; RV32I-NEXT:    addi a4, zero, 2
+; RV32I-NEXT:    call __atomic_compare_exchange_2
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val seq_cst acquire
+  ret void
+}
+
+define void @cmpxchg_i16_seq_cst_seq_cst(i16* %ptr, i16 %cmp, i16 %val) {
+; RV32I-LABEL: cmpxchg_i16_seq_cst_seq_cst:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sh a1, 10(sp)
+; RV32I-NEXT:    addi a1, sp, 10
+; RV32I-NEXT:    addi a3, zero, 5
+; RV32I-NEXT:    mv a4, a3
+; RV32I-NEXT:    call __atomic_compare_exchange_2
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val seq_cst seq_cst
+  ret void
+}
+
+define void @cmpxchg_i32_monotonic_monotonic(i32* %ptr, i32 %cmp, i32 %val) {
+; RV32I-LABEL: cmpxchg_i32_monotonic_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw a1, 8(sp)
+; RV32I-NEXT:    addi a1, sp, 8
+; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    call __atomic_compare_exchange_4
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val monotonic monotonic
+  ret void
+}
+
+define void @cmpxchg_i32_acquire_monotonic(i32* %ptr, i32 %cmp, i32 %val) {
+; RV32I-LABEL: cmpxchg_i32_acquire_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw a1, 8(sp)
+; RV32I-NEXT:    addi a1, sp, 8
+; RV32I-NEXT:    addi a3, zero, 2
+; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    call __atomic_compare_exchange_4
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val acquire monotonic
+  ret void
+}
+
+define void @cmpxchg_i32_acquire_acquire(i32* %ptr, i32 %cmp, i32 %val) {
+; RV32I-LABEL: cmpxchg_i32_acquire_acquire:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw a1, 8(sp)
+; RV32I-NEXT:    addi a1, sp, 8
+; RV32I-NEXT:    addi a3, zero, 2
+; RV32I-NEXT:    mv a4, a3
+; RV32I-NEXT:    call __atomic_compare_exchange_4
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val acquire acquire
+  ret void
+}
+
+define void @cmpxchg_i32_release_monotonic(i32* %ptr, i32 %cmp, i32 %val) {
+; RV32I-LABEL: cmpxchg_i32_release_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw a1, 8(sp)
+; RV32I-NEXT:    addi a1, sp, 8
+; RV32I-NEXT:    addi a3, zero, 3
+; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    call __atomic_compare_exchange_4
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val release monotonic
+  ret void
+}
+
+define void @cmpxchg_i32_release_acquire(i32* %ptr, i32 %cmp, i32 %val) {
+; RV32I-LABEL: cmpxchg_i32_release_acquire:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw a1, 8(sp)
+; RV32I-NEXT:    addi a1, sp, 8
+; RV32I-NEXT:    addi a3, zero, 3
+; RV32I-NEXT:    addi a4, zero, 2
+; RV32I-NEXT:    call __atomic_compare_exchange_4
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val release acquire
+  ret void
+}
+
+define void @cmpxchg_i32_acq_rel_monotonic(i32* %ptr, i32 %cmp, i32 %val) {
+; RV32I-LABEL: cmpxchg_i32_acq_rel_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw a1, 8(sp)
+; RV32I-NEXT:    addi a1, sp, 8
+; RV32I-NEXT:    addi a3, zero, 4
+; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    call __atomic_compare_exchange_4
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val acq_rel monotonic
+  ret void
+}
+
+define void @cmpxchg_i32_acq_rel_acquire(i32* %ptr, i32 %cmp, i32 %val) {
+; RV32I-LABEL: cmpxchg_i32_acq_rel_acquire:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw a1, 8(sp)
+; RV32I-NEXT:    addi a1, sp, 8
+; RV32I-NEXT:    addi a3, zero, 4
+; RV32I-NEXT:    addi a4, zero, 2
+; RV32I-NEXT:    call __atomic_compare_exchange_4
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val acq_rel acquire
+  ret void
+}
+
+define void @cmpxchg_i32_seq_cst_monotonic(i32* %ptr, i32 %cmp, i32 %val) {
+; RV32I-LABEL: cmpxchg_i32_seq_cst_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw a1, 8(sp)
+; RV32I-NEXT:    addi a1, sp, 8
+; RV32I-NEXT:    addi a3, zero, 5
+; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    call __atomic_compare_exchange_4
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val seq_cst monotonic
+  ret void
+}
+
+define void @cmpxchg_i32_seq_cst_acquire(i32* %ptr, i32 %cmp, i32 %val) {
+; RV32I-LABEL: cmpxchg_i32_seq_cst_acquire:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw a1, 8(sp)
+; RV32I-NEXT:    addi a1, sp, 8
+; RV32I-NEXT:    addi a3, zero, 5
+; RV32I-NEXT:    addi a4, zero, 2
+; RV32I-NEXT:    call __atomic_compare_exchange_4
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val seq_cst acquire
+  ret void
+}
+
+define void @cmpxchg_i32_seq_cst_seq_cst(i32* %ptr, i32 %cmp, i32 %val) {
+; RV32I-LABEL: cmpxchg_i32_seq_cst_seq_cst:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw a1, 8(sp)
+; RV32I-NEXT:    addi a1, sp, 8
+; RV32I-NEXT:    addi a3, zero, 5
+; RV32I-NEXT:    mv a4, a3
+; RV32I-NEXT:    call __atomic_compare_exchange_4
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val seq_cst seq_cst
+  ret void
+}
+
+define void @cmpxchg_i64_monotonic_monotonic(i64* %ptr, i64 %cmp, i64 %val) {
+; RV32I-LABEL: cmpxchg_i64_monotonic_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw a2, 4(sp)
+; RV32I-NEXT:    sw a1, 0(sp)
+; RV32I-NEXT:    mv a1, sp
+; RV32I-NEXT:    mv a2, a3
+; RV32I-NEXT:    mv a3, a4
+; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    mv a5, zero
+; RV32I-NEXT:    call __atomic_compare_exchange_8
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val monotonic monotonic
+  ret void
+}
+
+define void @cmpxchg_i64_acquire_monotonic(i64* %ptr, i64 %cmp, i64 %val) {
+; RV32I-LABEL: cmpxchg_i64_acquire_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw a2, 4(sp)
+; RV32I-NEXT:    sw a1, 0(sp)
+; RV32I-NEXT:    mv a1, sp
+; RV32I-NEXT:    addi a5, zero, 2
+; RV32I-NEXT:    mv a2, a3
+; RV32I-NEXT:    mv a3, a4
+; RV32I-NEXT:    mv a4, a5
+; RV32I-NEXT:    mv a5, zero
+; RV32I-NEXT:    call __atomic_compare_exchange_8
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val acquire monotonic
+  ret void
+}
+
+define void @cmpxchg_i64_acquire_acquire(i64* %ptr, i64 %cmp, i64 %val) {
+; RV32I-LABEL: cmpxchg_i64_acquire_acquire:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw a2, 4(sp)
+; RV32I-NEXT:    sw a1, 0(sp)
+; RV32I-NEXT:    mv a1, sp
+; RV32I-NEXT:    addi a5, zero, 2
+; RV32I-NEXT:    mv a2, a3
+; RV32I-NEXT:    mv a3, a4
+; RV32I-NEXT:    mv a4, a5
+; RV32I-NEXT:    call __atomic_compare_exchange_8
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val acquire acquire
+  ret void
+}
+
+define void @cmpxchg_i64_release_monotonic(i64* %ptr, i64 %cmp, i64 %val) {
+; RV32I-LABEL: cmpxchg_i64_release_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw a2, 4(sp)
+; RV32I-NEXT:    sw a1, 0(sp)
+; RV32I-NEXT:    mv a1, sp
+; RV32I-NEXT:    addi a5, zero, 3
+; RV32I-NEXT:    mv a2, a3
+; RV32I-NEXT:    mv a3, a4
+; RV32I-NEXT:    mv a4, a5
+; RV32I-NEXT:    mv a5, zero
+; RV32I-NEXT:    call __atomic_compare_exchange_8
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val release monotonic
+  ret void
+}
+
+define void @cmpxchg_i64_release_acquire(i64* %ptr, i64 %cmp, i64 %val) {
+; RV32I-LABEL: cmpxchg_i64_release_acquire:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw a2, 4(sp)
+; RV32I-NEXT:    sw a1, 0(sp)
+; RV32I-NEXT:    mv a1, sp
+; RV32I-NEXT:    addi a6, zero, 3
+; RV32I-NEXT:    addi a5, zero, 2
+; RV32I-NEXT:    mv a2, a3
+; RV32I-NEXT:    mv a3, a4
+; RV32I-NEXT:    mv a4, a6
+; RV32I-NEXT:    call __atomic_compare_exchange_8
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val release acquire
+  ret void
+}
+
+define void @cmpxchg_i64_acq_rel_monotonic(i64* %ptr, i64 %cmp, i64 %val) {
+; RV32I-LABEL: cmpxchg_i64_acq_rel_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw a2, 4(sp)
+; RV32I-NEXT:    sw a1, 0(sp)
+; RV32I-NEXT:    mv a1, sp
+; RV32I-NEXT:    addi a5, zero, 4
+; RV32I-NEXT:    mv a2, a3
+; RV32I-NEXT:    mv a3, a4
+; RV32I-NEXT:    mv a4, a5
+; RV32I-NEXT:    mv a5, zero
+; RV32I-NEXT:    call __atomic_compare_exchange_8
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val acq_rel monotonic
+  ret void
+}
+
+define void @cmpxchg_i64_acq_rel_acquire(i64* %ptr, i64 %cmp, i64 %val) {
+; RV32I-LABEL: cmpxchg_i64_acq_rel_acquire:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw a2, 4(sp)
+; RV32I-NEXT:    sw a1, 0(sp)
+; RV32I-NEXT:    mv a1, sp
+; RV32I-NEXT:    addi a6, zero, 4
+; RV32I-NEXT:    addi a5, zero, 2
+; RV32I-NEXT:    mv a2, a3
+; RV32I-NEXT:    mv a3, a4
+; RV32I-NEXT:    mv a4, a6
+; RV32I-NEXT:    call __atomic_compare_exchange_8
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val acq_rel acquire
+  ret void
+}
+
+define void @cmpxchg_i64_seq_cst_monotonic(i64* %ptr, i64 %cmp, i64 %val) {
+; RV32I-LABEL: cmpxchg_i64_seq_cst_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw a2, 4(sp)
+; RV32I-NEXT:    sw a1, 0(sp)
+; RV32I-NEXT:    mv a1, sp
+; RV32I-NEXT:    addi a5, zero, 5
+; RV32I-NEXT:    mv a2, a3
+; RV32I-NEXT:    mv a3, a4
+; RV32I-NEXT:    mv a4, a5
+; RV32I-NEXT:    mv a5, zero
+; RV32I-NEXT:    call __atomic_compare_exchange_8
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val seq_cst monotonic
+  ret void
+}
+
+define void @cmpxchg_i64_seq_cst_acquire(i64* %ptr, i64 %cmp, i64 %val) {
+; RV32I-LABEL: cmpxchg_i64_seq_cst_acquire:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw a2, 4(sp)
+; RV32I-NEXT:    sw a1, 0(sp)
+; RV32I-NEXT:    mv a1, sp
+; RV32I-NEXT:    addi a6, zero, 5
+; RV32I-NEXT:    addi a5, zero, 2
+; RV32I-NEXT:    mv a2, a3
+; RV32I-NEXT:    mv a3, a4
+; RV32I-NEXT:    mv a4, a6
+; RV32I-NEXT:    call __atomic_compare_exchange_8
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val seq_cst acquire
+  ret void
+}
+
+define void @cmpxchg_i64_seq_cst_seq_cst(i64* %ptr, i64 %cmp, i64 %val) {
+; RV32I-LABEL: cmpxchg_i64_seq_cst_seq_cst:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    sw a2, 4(sp)
+; RV32I-NEXT:    sw a1, 0(sp)
+; RV32I-NEXT:    mv a1, sp
+; RV32I-NEXT:    addi a5, zero, 5
+; RV32I-NEXT:    mv a2, a3
+; RV32I-NEXT:    mv a3, a4
+; RV32I-NEXT:    mv a4, a5
+; RV32I-NEXT:    call __atomic_compare_exchange_8
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val seq_cst seq_cst
+  ret void
+}

Added: llvm/trunk/test/CodeGen/RISCV/atomic-fence.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/RISCV/atomic-fence.ll?rev=334590&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/RISCV/atomic-fence.ll (added)
+++ llvm/trunk/test/CodeGen/RISCV/atomic-fence.ll Wed Jun 13 04:58:46 2018
@@ -0,0 +1,41 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV32I %s
+; RUN: llc -mtriple=riscv32 -mattr=+a -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV32I %s
+
+define void @fence_acquire() nounwind {
+; RV32I-LABEL: fence_acquire:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    fence r, rw
+; RV32I-NEXT:    ret
+  fence acquire
+  ret void
+}
+
+define void @fence_release() nounwind {
+; RV32I-LABEL: fence_release:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    fence rw, w
+; RV32I-NEXT:    ret
+  fence release
+  ret void
+}
+
+define void @fence_acq_rel() nounwind {
+; RV32I-LABEL: fence_acq_rel:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    fence.tso
+; RV32I-NEXT:    ret
+  fence acq_rel
+  ret void
+}
+
+define void @fence_seq_cst() nounwind {
+; RV32I-LABEL: fence_seq_cst:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    fence rw, rw
+; RV32I-NEXT:    ret
+  fence seq_cst
+  ret void
+}

Added: llvm/trunk/test/CodeGen/RISCV/atomic-load-store.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/RISCV/atomic-load-store.ll?rev=334590&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/RISCV/atomic-load-store.ll (added)
+++ llvm/trunk/test/CodeGen/RISCV/atomic-load-store.ll Wed Jun 13 04:58:46 2018
@@ -0,0 +1,451 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV32I %s
+
+define i8 @atomic_load_i8_unordered(i8 *%a) nounwind {
+; RV32I-LABEL: atomic_load_i8_unordered:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    call __atomic_load_1
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = load atomic i8, i8* %a unordered, align 1
+  ret i8 %1
+}
+
+define i8 @atomic_load_i8_monotonic(i8 *%a) nounwind {
+; RV32I-LABEL: atomic_load_i8_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    call __atomic_load_1
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = load atomic i8, i8* %a monotonic, align 1
+  ret i8 %1
+}
+
+define i8 @atomic_load_i8_acquire(i8 *%a) nounwind {
+; RV32I-LABEL: atomic_load_i8_acquire:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a1, zero, 2
+; RV32I-NEXT:    call __atomic_load_1
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = load atomic i8, i8* %a acquire, align 1
+  ret i8 %1
+}
+
+define i8 @atomic_load_i8_seq_cst(i8 *%a) nounwind {
+; RV32I-LABEL: atomic_load_i8_seq_cst:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a1, zero, 5
+; RV32I-NEXT:    call __atomic_load_1
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = load atomic i8, i8* %a seq_cst, align 1
+  ret i8 %1
+}
+
+define i16 @atomic_load_i16_unordered(i16 *%a) nounwind {
+; RV32I-LABEL: atomic_load_i16_unordered:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    call __atomic_load_2
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = load atomic i16, i16* %a unordered, align 2
+  ret i16 %1
+}
+
+define i16 @atomic_load_i16_monotonic(i16 *%a) nounwind {
+; RV32I-LABEL: atomic_load_i16_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    call __atomic_load_2
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = load atomic i16, i16* %a monotonic, align 2
+  ret i16 %1
+}
+
+define i16 @atomic_load_i16_acquire(i16 *%a) nounwind {
+; RV32I-LABEL: atomic_load_i16_acquire:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a1, zero, 2
+; RV32I-NEXT:    call __atomic_load_2
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = load atomic i16, i16* %a acquire, align 2
+  ret i16 %1
+}
+
+define i16 @atomic_load_i16_seq_cst(i16 *%a) nounwind {
+; RV32I-LABEL: atomic_load_i16_seq_cst:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a1, zero, 5
+; RV32I-NEXT:    call __atomic_load_2
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = load atomic i16, i16* %a seq_cst, align 2
+  ret i16 %1
+}
+
+define i32 @atomic_load_i32_unordered(i32 *%a) nounwind {
+; RV32I-LABEL: atomic_load_i32_unordered:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    call __atomic_load_4
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = load atomic i32, i32* %a unordered, align 4
+  ret i32 %1
+}
+
+define i32 @atomic_load_i32_monotonic(i32 *%a) nounwind {
+; RV32I-LABEL: atomic_load_i32_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    call __atomic_load_4
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = load atomic i32, i32* %a monotonic, align 4
+  ret i32 %1
+}
+
+define i32 @atomic_load_i32_acquire(i32 *%a) nounwind {
+; RV32I-LABEL: atomic_load_i32_acquire:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a1, zero, 2
+; RV32I-NEXT:    call __atomic_load_4
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = load atomic i32, i32* %a acquire, align 4
+  ret i32 %1
+}
+
+define i32 @atomic_load_i32_seq_cst(i32 *%a) nounwind {
+; RV32I-LABEL: atomic_load_i32_seq_cst:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a1, zero, 5
+; RV32I-NEXT:    call __atomic_load_4
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = load atomic i32, i32* %a seq_cst, align 4
+  ret i32 %1
+}
+
+define i64 @atomic_load_i64_unordered(i64 *%a) nounwind {
+; RV32I-LABEL: atomic_load_i64_unordered:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    call __atomic_load_8
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = load atomic i64, i64* %a unordered, align 8
+  ret i64 %1
+}
+
+define i64 @atomic_load_i64_monotonic(i64 *%a) nounwind {
+; RV32I-LABEL: atomic_load_i64_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    call __atomic_load_8
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = load atomic i64, i64* %a monotonic, align 8
+  ret i64 %1
+}
+
+define i64 @atomic_load_i64_acquire(i64 *%a) nounwind {
+; RV32I-LABEL: atomic_load_i64_acquire:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a1, zero, 2
+; RV32I-NEXT:    call __atomic_load_8
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = load atomic i64, i64* %a acquire, align 8
+  ret i64 %1
+}
+
+define i64 @atomic_load_i64_seq_cst(i64 *%a) nounwind {
+; RV32I-LABEL: atomic_load_i64_seq_cst:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a1, zero, 5
+; RV32I-NEXT:    call __atomic_load_8
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = load atomic i64, i64* %a seq_cst, align 8
+  ret i64 %1
+}
+
+define void @atomic_store_i8_unordered(i8 *%a, i8 %b) nounwind {
+; RV32I-LABEL: atomic_store_i8_unordered:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    call __atomic_store_1
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  store atomic i8 %b, i8* %a unordered, align 1
+  ret void
+}
+
+define void @atomic_store_i8_monotonic(i8 *%a, i8 %b) nounwind {
+; RV32I-LABEL: atomic_store_i8_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    call __atomic_store_1
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  store atomic i8 %b, i8* %a monotonic, align 1
+  ret void
+}
+
+define void @atomic_store_i8_release(i8 *%a, i8 %b) nounwind {
+; RV32I-LABEL: atomic_store_i8_release:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 3
+; RV32I-NEXT:    call __atomic_store_1
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  store atomic i8 %b, i8* %a release, align 1
+  ret void
+}
+
+define void @atomic_store_i8_seq_cst(i8 *%a, i8 %b) nounwind {
+; RV32I-LABEL: atomic_store_i8_seq_cst:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 5
+; RV32I-NEXT:    call __atomic_store_1
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  store atomic i8 %b, i8* %a seq_cst, align 1
+  ret void
+}
+
+define void @atomic_store_i16_unordered(i16 *%a, i16 %b) nounwind {
+; RV32I-LABEL: atomic_store_i16_unordered:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    call __atomic_store_2
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  store atomic i16 %b, i16* %a unordered, align 2
+  ret void
+}
+
+define void @atomic_store_i16_monotonic(i16 *%a, i16 %b) nounwind {
+; RV32I-LABEL: atomic_store_i16_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    call __atomic_store_2
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  store atomic i16 %b, i16* %a monotonic, align 2
+  ret void
+}
+
+define void @atomic_store_i16_release(i16 *%a, i16 %b) nounwind {
+; RV32I-LABEL: atomic_store_i16_release:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 3
+; RV32I-NEXT:    call __atomic_store_2
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  store atomic i16 %b, i16* %a release, align 2
+  ret void
+}
+
+define void @atomic_store_i16_seq_cst(i16 *%a, i16 %b) nounwind {
+; RV32I-LABEL: atomic_store_i16_seq_cst:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 5
+; RV32I-NEXT:    call __atomic_store_2
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  store atomic i16 %b, i16* %a seq_cst, align 2
+  ret void
+}
+
+define void @atomic_store_i32_unordered(i32 *%a, i32 %b) nounwind {
+; RV32I-LABEL: atomic_store_i32_unordered:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    call __atomic_store_4
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  store atomic i32 %b, i32* %a unordered, align 4
+  ret void
+}
+
+define void @atomic_store_i32_monotonic(i32 *%a, i32 %b) nounwind {
+; RV32I-LABEL: atomic_store_i32_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    call __atomic_store_4
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  store atomic i32 %b, i32* %a monotonic, align 4
+  ret void
+}
+
+define void @atomic_store_i32_release(i32 *%a, i32 %b) nounwind {
+; RV32I-LABEL: atomic_store_i32_release:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 3
+; RV32I-NEXT:    call __atomic_store_4
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  store atomic i32 %b, i32* %a release, align 4
+  ret void
+}
+
+define void @atomic_store_i32_seq_cst(i32 *%a, i32 %b) nounwind {
+; RV32I-LABEL: atomic_store_i32_seq_cst:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 5
+; RV32I-NEXT:    call __atomic_store_4
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  store atomic i32 %b, i32* %a seq_cst, align 4
+  ret void
+}
+
+define void @atomic_store_i64_unordered(i64 *%a, i64 %b) nounwind {
+; RV32I-LABEL: atomic_store_i64_unordered:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    call __atomic_store_8
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  store atomic i64 %b, i64* %a unordered, align 8
+  ret void
+}
+
+define void @atomic_store_i64_monotonic(i64 *%a, i64 %b) nounwind {
+; RV32I-LABEL: atomic_store_i64_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    call __atomic_store_8
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  store atomic i64 %b, i64* %a monotonic, align 8
+  ret void
+}
+
+define void @atomic_store_i64_release(i64 *%a, i64 %b) nounwind {
+; RV32I-LABEL: atomic_store_i64_release:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a3, zero, 3
+; RV32I-NEXT:    call __atomic_store_8
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  store atomic i64 %b, i64* %a release, align 8
+  ret void
+}
+
+define void @atomic_store_i64_seq_cst(i64 *%a, i64 %b) nounwind {
+; RV32I-LABEL: atomic_store_i64_seq_cst:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a3, zero, 5
+; RV32I-NEXT:    call __atomic_store_8
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  store atomic i64 %b, i64* %a seq_cst, align 8
+  ret void
+}

Added: llvm/trunk/test/CodeGen/RISCV/atomic-rmw.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/RISCV/atomic-rmw.ll?rev=334590&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/RISCV/atomic-rmw.ll (added)
+++ llvm/trunk/test/CodeGen/RISCV/atomic-rmw.ll Wed Jun 13 04:58:46 2018
@@ -0,0 +1,6133 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV32I %s
+
+define i8 @atomicrmw_xchg_i8_monotonic(i8* %a, i8 %b) {
+; RV32I-LABEL: atomicrmw_xchg_i8_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    call __atomic_exchange_1
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw xchg i8* %a, i8 %b monotonic
+  ret i8 %1
+}
+
+define i8 @atomicrmw_xchg_i8_acquire(i8* %a, i8 %b) {
+; RV32I-LABEL: atomicrmw_xchg_i8_acquire:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 2
+; RV32I-NEXT:    call __atomic_exchange_1
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw xchg i8* %a, i8 %b acquire
+  ret i8 %1
+}
+
+define i8 @atomicrmw_xchg_i8_release(i8* %a, i8 %b) {
+; RV32I-LABEL: atomicrmw_xchg_i8_release:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 3
+; RV32I-NEXT:    call __atomic_exchange_1
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw xchg i8* %a, i8 %b release
+  ret i8 %1
+}
+
+define i8 @atomicrmw_xchg_i8_acq_rel(i8* %a, i8 %b) {
+; RV32I-LABEL: atomicrmw_xchg_i8_acq_rel:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 4
+; RV32I-NEXT:    call __atomic_exchange_1
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw xchg i8* %a, i8 %b acq_rel
+  ret i8 %1
+}
+
+define i8 @atomicrmw_xchg_i8_seq_cst(i8* %a, i8 %b) {
+; RV32I-LABEL: atomicrmw_xchg_i8_seq_cst:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 5
+; RV32I-NEXT:    call __atomic_exchange_1
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw xchg i8* %a, i8 %b seq_cst
+  ret i8 %1
+}
+
+define i8 @atomicrmw_add_i8_monotonic(i8 *%a, i8 %b) nounwind {
+; RV32I-LABEL: atomicrmw_add_i8_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    call __atomic_fetch_add_1
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw add i8* %a, i8 %b monotonic
+  ret i8 %1
+}
+
+define i8 @atomicrmw_add_i8_acquire(i8 *%a, i8 %b) nounwind {
+; RV32I-LABEL: atomicrmw_add_i8_acquire:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 2
+; RV32I-NEXT:    call __atomic_fetch_add_1
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw add i8* %a, i8 %b acquire
+  ret i8 %1
+}
+
+define i8 @atomicrmw_add_i8_release(i8 *%a, i8 %b) nounwind {
+; RV32I-LABEL: atomicrmw_add_i8_release:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 3
+; RV32I-NEXT:    call __atomic_fetch_add_1
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw add i8* %a, i8 %b release
+  ret i8 %1
+}
+
+define i8 @atomicrmw_add_i8_acq_rel(i8 *%a, i8 %b) nounwind {
+; RV32I-LABEL: atomicrmw_add_i8_acq_rel:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 4
+; RV32I-NEXT:    call __atomic_fetch_add_1
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw add i8* %a, i8 %b acq_rel
+  ret i8 %1
+}
+
+define i8 @atomicrmw_add_i8_seq_cst(i8 *%a, i8 %b) nounwind {
+; RV32I-LABEL: atomicrmw_add_i8_seq_cst:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 5
+; RV32I-NEXT:    call __atomic_fetch_add_1
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw add i8* %a, i8 %b seq_cst
+  ret i8 %1
+}
+
+define i8 @atomicrmw_sub_i8_monotonic(i8* %a, i8 %b) {
+; RV32I-LABEL: atomicrmw_sub_i8_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    call __atomic_fetch_sub_1
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw sub i8* %a, i8 %b monotonic
+  ret i8 %1
+}
+
+define i8 @atomicrmw_sub_i8_acquire(i8* %a, i8 %b) {
+; RV32I-LABEL: atomicrmw_sub_i8_acquire:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 2
+; RV32I-NEXT:    call __atomic_fetch_sub_1
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw sub i8* %a, i8 %b acquire
+  ret i8 %1
+}
+
+define i8 @atomicrmw_sub_i8_release(i8* %a, i8 %b) {
+; RV32I-LABEL: atomicrmw_sub_i8_release:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 3
+; RV32I-NEXT:    call __atomic_fetch_sub_1
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw sub i8* %a, i8 %b release
+  ret i8 %1
+}
+
+define i8 @atomicrmw_sub_i8_acq_rel(i8* %a, i8 %b) {
+; RV32I-LABEL: atomicrmw_sub_i8_acq_rel:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 4
+; RV32I-NEXT:    call __atomic_fetch_sub_1
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw sub i8* %a, i8 %b acq_rel
+  ret i8 %1
+}
+
+define i8 @atomicrmw_sub_i8_seq_cst(i8* %a, i8 %b) {
+; RV32I-LABEL: atomicrmw_sub_i8_seq_cst:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 5
+; RV32I-NEXT:    call __atomic_fetch_sub_1
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw sub i8* %a, i8 %b seq_cst
+  ret i8 %1
+}
+
+define i8 @atomicrmw_and_i8_monotonic(i8 *%a, i8 %b) nounwind {
+; RV32I-LABEL: atomicrmw_and_i8_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    call __atomic_fetch_and_1
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw and i8* %a, i8 %b monotonic
+  ret i8 %1
+}
+
+define i8 @atomicrmw_and_i8_acquire(i8 *%a, i8 %b) nounwind {
+; RV32I-LABEL: atomicrmw_and_i8_acquire:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 2
+; RV32I-NEXT:    call __atomic_fetch_and_1
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw and i8* %a, i8 %b acquire
+  ret i8 %1
+}
+
+define i8 @atomicrmw_and_i8_release(i8 *%a, i8 %b) nounwind {
+; RV32I-LABEL: atomicrmw_and_i8_release:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 3
+; RV32I-NEXT:    call __atomic_fetch_and_1
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw and i8* %a, i8 %b release
+  ret i8 %1
+}
+
+define i8 @atomicrmw_and_i8_acq_rel(i8 *%a, i8 %b) nounwind {
+; RV32I-LABEL: atomicrmw_and_i8_acq_rel:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 4
+; RV32I-NEXT:    call __atomic_fetch_and_1
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw and i8* %a, i8 %b acq_rel
+  ret i8 %1
+}
+
+define i8 @atomicrmw_and_i8_seq_cst(i8 *%a, i8 %b) nounwind {
+; RV32I-LABEL: atomicrmw_and_i8_seq_cst:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 5
+; RV32I-NEXT:    call __atomic_fetch_and_1
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw and i8* %a, i8 %b seq_cst
+  ret i8 %1
+}
+
+define i8 @atomicrmw_nand_i8_monotonic(i8* %a, i8 %b) {
+; RV32I-LABEL: atomicrmw_nand_i8_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    call __atomic_fetch_nand_1
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw nand i8* %a, i8 %b monotonic
+  ret i8 %1
+}
+
+define i8 @atomicrmw_nand_i8_acquire(i8* %a, i8 %b) {
+; RV32I-LABEL: atomicrmw_nand_i8_acquire:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 2
+; RV32I-NEXT:    call __atomic_fetch_nand_1
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw nand i8* %a, i8 %b acquire
+  ret i8 %1
+}
+
+define i8 @atomicrmw_nand_i8_release(i8* %a, i8 %b) {
+; RV32I-LABEL: atomicrmw_nand_i8_release:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 3
+; RV32I-NEXT:    call __atomic_fetch_nand_1
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw nand i8* %a, i8 %b release
+  ret i8 %1
+}
+
+define i8 @atomicrmw_nand_i8_acq_rel(i8* %a, i8 %b) {
+; RV32I-LABEL: atomicrmw_nand_i8_acq_rel:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 4
+; RV32I-NEXT:    call __atomic_fetch_nand_1
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw nand i8* %a, i8 %b acq_rel
+  ret i8 %1
+}
+
+define i8 @atomicrmw_nand_i8_seq_cst(i8* %a, i8 %b) {
+; RV32I-LABEL: atomicrmw_nand_i8_seq_cst:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 5
+; RV32I-NEXT:    call __atomic_fetch_nand_1
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw nand i8* %a, i8 %b seq_cst
+  ret i8 %1
+}
+
+define i8 @atomicrmw_or_i8_monotonic(i8 *%a, i8 %b) nounwind {
+; RV32I-LABEL: atomicrmw_or_i8_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    call __atomic_fetch_or_1
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw or i8* %a, i8 %b monotonic
+  ret i8 %1
+}
+
+define i8 @atomicrmw_or_i8_acquire(i8 *%a, i8 %b) nounwind {
+; RV32I-LABEL: atomicrmw_or_i8_acquire:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 2
+; RV32I-NEXT:    call __atomic_fetch_or_1
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw or i8* %a, i8 %b acquire
+  ret i8 %1
+}
+
+define i8 @atomicrmw_or_i8_release(i8 *%a, i8 %b) nounwind {
+; RV32I-LABEL: atomicrmw_or_i8_release:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 3
+; RV32I-NEXT:    call __atomic_fetch_or_1
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw or i8* %a, i8 %b release
+  ret i8 %1
+}
+
+define i8 @atomicrmw_or_i8_acq_rel(i8 *%a, i8 %b) nounwind {
+; RV32I-LABEL: atomicrmw_or_i8_acq_rel:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 4
+; RV32I-NEXT:    call __atomic_fetch_or_1
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw or i8* %a, i8 %b acq_rel
+  ret i8 %1
+}
+
+define i8 @atomicrmw_or_i8_seq_cst(i8 *%a, i8 %b) nounwind {
+; RV32I-LABEL: atomicrmw_or_i8_seq_cst:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 5
+; RV32I-NEXT:    call __atomic_fetch_or_1
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw or i8* %a, i8 %b seq_cst
+  ret i8 %1
+}
+
+define i8 @atomicrmw_xor_i8_monotonic(i8 *%a, i8 %b) nounwind {
+; RV32I-LABEL: atomicrmw_xor_i8_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    call __atomic_fetch_xor_1
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw xor i8* %a, i8 %b monotonic
+  ret i8 %1
+}
+
+define i8 @atomicrmw_xor_i8_acquire(i8 *%a, i8 %b) nounwind {
+; RV32I-LABEL: atomicrmw_xor_i8_acquire:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 2
+; RV32I-NEXT:    call __atomic_fetch_xor_1
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw xor i8* %a, i8 %b acquire
+  ret i8 %1
+}
+
+define i8 @atomicrmw_xor_i8_release(i8 *%a, i8 %b) nounwind {
+; RV32I-LABEL: atomicrmw_xor_i8_release:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 3
+; RV32I-NEXT:    call __atomic_fetch_xor_1
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw xor i8* %a, i8 %b release
+  ret i8 %1
+}
+
+define i8 @atomicrmw_xor_i8_acq_rel(i8 *%a, i8 %b) nounwind {
+; RV32I-LABEL: atomicrmw_xor_i8_acq_rel:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 4
+; RV32I-NEXT:    call __atomic_fetch_xor_1
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw xor i8* %a, i8 %b acq_rel
+  ret i8 %1
+}
+
+define i8 @atomicrmw_xor_i8_seq_cst(i8 *%a, i8 %b) nounwind {
+; RV32I-LABEL: atomicrmw_xor_i8_seq_cst:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 5
+; RV32I-NEXT:    call __atomic_fetch_xor_1
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw xor i8* %a, i8 %b seq_cst
+  ret i8 %1
+}
+
+define i8 @atomicrmw_max_i8_monotonic(i8 *%a, i8 %b) nounwind {
+; RV32I-LABEL: atomicrmw_max_i8_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    mv s2, a1
+; RV32I-NEXT:    mv s4, a0
+; RV32I-NEXT:    lbu a0, 0(a0)
+; RV32I-NEXT:    slli a1, a1, 24
+; RV32I-NEXT:    srai s1, a1, 24
+; RV32I-NEXT:    addi s3, sp, 11
+; RV32I-NEXT:  .LBB35_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    slli a1, a0, 24
+; RV32I-NEXT:    srai a1, a1, 24
+; RV32I-NEXT:    mv a2, a0
+; RV32I-NEXT:    blt s1, a1, .LBB35_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB35_1 Depth=1
+; RV32I-NEXT:    mv a2, s2
+; RV32I-NEXT:  .LBB35_3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB35_1 Depth=1
+; RV32I-NEXT:    sb a0, 11(sp)
+; RV32I-NEXT:    mv a0, s4
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    call __atomic_compare_exchange_1
+; RV32I-NEXT:    mv a1, a0
+; RV32I-NEXT:    lb a0, 11(sp)
+; RV32I-NEXT:    beqz a1, .LBB35_1
+; RV32I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw max i8* %a, i8 %b monotonic
+  ret i8 %1
+}
+
+define i8 @atomicrmw_max_i8_acquire(i8 *%a, i8 %b) nounwind {
+; RV32I-LABEL: atomicrmw_max_i8_acquire:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    sw s5, 8(sp)
+; RV32I-NEXT:    mv s2, a1
+; RV32I-NEXT:    mv s4, a0
+; RV32I-NEXT:    lbu a0, 0(a0)
+; RV32I-NEXT:    slli a1, a1, 24
+; RV32I-NEXT:    srai s5, a1, 24
+; RV32I-NEXT:    addi s3, sp, 7
+; RV32I-NEXT:    addi s1, zero, 2
+; RV32I-NEXT:  .LBB36_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    slli a1, a0, 24
+; RV32I-NEXT:    srai a1, a1, 24
+; RV32I-NEXT:    mv a2, a0
+; RV32I-NEXT:    blt s5, a1, .LBB36_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB36_1 Depth=1
+; RV32I-NEXT:    mv a2, s2
+; RV32I-NEXT:  .LBB36_3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB36_1 Depth=1
+; RV32I-NEXT:    sb a0, 7(sp)
+; RV32I-NEXT:    mv a0, s4
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:    mv a3, s1
+; RV32I-NEXT:    mv a4, s1
+; RV32I-NEXT:    call __atomic_compare_exchange_1
+; RV32I-NEXT:    mv a1, a0
+; RV32I-NEXT:    lb a0, 7(sp)
+; RV32I-NEXT:    beqz a1, .LBB36_1
+; RV32I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV32I-NEXT:    lw s5, 8(sp)
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw max i8* %a, i8 %b acquire
+  ret i8 %1
+}
+
+define i8 @atomicrmw_max_i8_release(i8 *%a, i8 %b) nounwind {
+; RV32I-LABEL: atomicrmw_max_i8_release:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    sw s5, 8(sp)
+; RV32I-NEXT:    mv s2, a1
+; RV32I-NEXT:    mv s5, a0
+; RV32I-NEXT:    lbu a0, 0(a0)
+; RV32I-NEXT:    slli a1, a1, 24
+; RV32I-NEXT:    srai s1, a1, 24
+; RV32I-NEXT:    addi s3, sp, 7
+; RV32I-NEXT:    addi s4, zero, 3
+; RV32I-NEXT:  .LBB37_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    slli a1, a0, 24
+; RV32I-NEXT:    srai a1, a1, 24
+; RV32I-NEXT:    mv a2, a0
+; RV32I-NEXT:    blt s1, a1, .LBB37_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB37_1 Depth=1
+; RV32I-NEXT:    mv a2, s2
+; RV32I-NEXT:  .LBB37_3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB37_1 Depth=1
+; RV32I-NEXT:    sb a0, 7(sp)
+; RV32I-NEXT:    mv a0, s5
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:    mv a3, s4
+; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    call __atomic_compare_exchange_1
+; RV32I-NEXT:    mv a1, a0
+; RV32I-NEXT:    lb a0, 7(sp)
+; RV32I-NEXT:    beqz a1, .LBB37_1
+; RV32I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV32I-NEXT:    lw s5, 8(sp)
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw max i8* %a, i8 %b release
+  ret i8 %1
+}
+
+define i8 @atomicrmw_max_i8_acq_rel(i8 *%a, i8 %b) nounwind {
+; RV32I-LABEL: atomicrmw_max_i8_acq_rel:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    sw s5, 8(sp)
+; RV32I-NEXT:    sw s6, 4(sp)
+; RV32I-NEXT:    mv s2, a1
+; RV32I-NEXT:    mv s6, a0
+; RV32I-NEXT:    lbu a0, 0(a0)
+; RV32I-NEXT:    slli a1, a1, 24
+; RV32I-NEXT:    srai s1, a1, 24
+; RV32I-NEXT:    addi s3, sp, 3
+; RV32I-NEXT:    addi s4, zero, 4
+; RV32I-NEXT:    addi s5, zero, 2
+; RV32I-NEXT:  .LBB38_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    slli a1, a0, 24
+; RV32I-NEXT:    srai a1, a1, 24
+; RV32I-NEXT:    mv a2, a0
+; RV32I-NEXT:    blt s1, a1, .LBB38_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB38_1 Depth=1
+; RV32I-NEXT:    mv a2, s2
+; RV32I-NEXT:  .LBB38_3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB38_1 Depth=1
+; RV32I-NEXT:    sb a0, 3(sp)
+; RV32I-NEXT:    mv a0, s6
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:    mv a3, s4
+; RV32I-NEXT:    mv a4, s5
+; RV32I-NEXT:    call __atomic_compare_exchange_1
+; RV32I-NEXT:    mv a1, a0
+; RV32I-NEXT:    lb a0, 3(sp)
+; RV32I-NEXT:    beqz a1, .LBB38_1
+; RV32I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV32I-NEXT:    lw s6, 4(sp)
+; RV32I-NEXT:    lw s5, 8(sp)
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw max i8* %a, i8 %b acq_rel
+  ret i8 %1
+}
+
+define i8 @atomicrmw_max_i8_seq_cst(i8 *%a, i8 %b) nounwind {
+; RV32I-LABEL: atomicrmw_max_i8_seq_cst:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    sw s5, 8(sp)
+; RV32I-NEXT:    mv s2, a1
+; RV32I-NEXT:    mv s4, a0
+; RV32I-NEXT:    lbu a0, 0(a0)
+; RV32I-NEXT:    slli a1, a1, 24
+; RV32I-NEXT:    srai s5, a1, 24
+; RV32I-NEXT:    addi s3, sp, 7
+; RV32I-NEXT:    addi s1, zero, 5
+; RV32I-NEXT:  .LBB39_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    slli a1, a0, 24
+; RV32I-NEXT:    srai a1, a1, 24
+; RV32I-NEXT:    mv a2, a0
+; RV32I-NEXT:    blt s5, a1, .LBB39_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB39_1 Depth=1
+; RV32I-NEXT:    mv a2, s2
+; RV32I-NEXT:  .LBB39_3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB39_1 Depth=1
+; RV32I-NEXT:    sb a0, 7(sp)
+; RV32I-NEXT:    mv a0, s4
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:    mv a3, s1
+; RV32I-NEXT:    mv a4, s1
+; RV32I-NEXT:    call __atomic_compare_exchange_1
+; RV32I-NEXT:    mv a1, a0
+; RV32I-NEXT:    lb a0, 7(sp)
+; RV32I-NEXT:    beqz a1, .LBB39_1
+; RV32I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV32I-NEXT:    lw s5, 8(sp)
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw max i8* %a, i8 %b seq_cst
+  ret i8 %1
+}
+
+define i8 @atomicrmw_min_i8_monotonic(i8 *%a, i8 %b) nounwind {
+; RV32I-LABEL: atomicrmw_min_i8_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    mv s2, a1
+; RV32I-NEXT:    mv s4, a0
+; RV32I-NEXT:    lbu a0, 0(a0)
+; RV32I-NEXT:    slli a1, a1, 24
+; RV32I-NEXT:    srai s1, a1, 24
+; RV32I-NEXT:    addi s3, sp, 11
+; RV32I-NEXT:  .LBB40_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    slli a1, a0, 24
+; RV32I-NEXT:    srai a1, a1, 24
+; RV32I-NEXT:    mv a2, a0
+; RV32I-NEXT:    bge s1, a1, .LBB40_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB40_1 Depth=1
+; RV32I-NEXT:    mv a2, s2
+; RV32I-NEXT:  .LBB40_3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB40_1 Depth=1
+; RV32I-NEXT:    sb a0, 11(sp)
+; RV32I-NEXT:    mv a0, s4
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    call __atomic_compare_exchange_1
+; RV32I-NEXT:    mv a1, a0
+; RV32I-NEXT:    lb a0, 11(sp)
+; RV32I-NEXT:    beqz a1, .LBB40_1
+; RV32I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw min i8* %a, i8 %b monotonic
+  ret i8 %1
+}
+
+define i8 @atomicrmw_min_i8_acquire(i8 *%a, i8 %b) nounwind {
+; RV32I-LABEL: atomicrmw_min_i8_acquire:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    sw s5, 8(sp)
+; RV32I-NEXT:    mv s2, a1
+; RV32I-NEXT:    mv s4, a0
+; RV32I-NEXT:    lbu a0, 0(a0)
+; RV32I-NEXT:    slli a1, a1, 24
+; RV32I-NEXT:    srai s5, a1, 24
+; RV32I-NEXT:    addi s3, sp, 7
+; RV32I-NEXT:    addi s1, zero, 2
+; RV32I-NEXT:  .LBB41_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    slli a1, a0, 24
+; RV32I-NEXT:    srai a1, a1, 24
+; RV32I-NEXT:    mv a2, a0
+; RV32I-NEXT:    bge s5, a1, .LBB41_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB41_1 Depth=1
+; RV32I-NEXT:    mv a2, s2
+; RV32I-NEXT:  .LBB41_3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB41_1 Depth=1
+; RV32I-NEXT:    sb a0, 7(sp)
+; RV32I-NEXT:    mv a0, s4
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:    mv a3, s1
+; RV32I-NEXT:    mv a4, s1
+; RV32I-NEXT:    call __atomic_compare_exchange_1
+; RV32I-NEXT:    mv a1, a0
+; RV32I-NEXT:    lb a0, 7(sp)
+; RV32I-NEXT:    beqz a1, .LBB41_1
+; RV32I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV32I-NEXT:    lw s5, 8(sp)
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw min i8* %a, i8 %b acquire
+  ret i8 %1
+}
+
+define i8 @atomicrmw_min_i8_release(i8 *%a, i8 %b) nounwind {
+; RV32I-LABEL: atomicrmw_min_i8_release:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    sw s5, 8(sp)
+; RV32I-NEXT:    mv s2, a1
+; RV32I-NEXT:    mv s5, a0
+; RV32I-NEXT:    lbu a0, 0(a0)
+; RV32I-NEXT:    slli a1, a1, 24
+; RV32I-NEXT:    srai s1, a1, 24
+; RV32I-NEXT:    addi s3, sp, 7
+; RV32I-NEXT:    addi s4, zero, 3
+; RV32I-NEXT:  .LBB42_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    slli a1, a0, 24
+; RV32I-NEXT:    srai a1, a1, 24
+; RV32I-NEXT:    mv a2, a0
+; RV32I-NEXT:    bge s1, a1, .LBB42_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB42_1 Depth=1
+; RV32I-NEXT:    mv a2, s2
+; RV32I-NEXT:  .LBB42_3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB42_1 Depth=1
+; RV32I-NEXT:    sb a0, 7(sp)
+; RV32I-NEXT:    mv a0, s5
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:    mv a3, s4
+; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    call __atomic_compare_exchange_1
+; RV32I-NEXT:    mv a1, a0
+; RV32I-NEXT:    lb a0, 7(sp)
+; RV32I-NEXT:    beqz a1, .LBB42_1
+; RV32I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV32I-NEXT:    lw s5, 8(sp)
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw min i8* %a, i8 %b release
+  ret i8 %1
+}
+
+define i8 @atomicrmw_min_i8_acq_rel(i8 *%a, i8 %b) nounwind {
+; RV32I-LABEL: atomicrmw_min_i8_acq_rel:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    sw s5, 8(sp)
+; RV32I-NEXT:    sw s6, 4(sp)
+; RV32I-NEXT:    mv s2, a1
+; RV32I-NEXT:    mv s6, a0
+; RV32I-NEXT:    lbu a0, 0(a0)
+; RV32I-NEXT:    slli a1, a1, 24
+; RV32I-NEXT:    srai s1, a1, 24
+; RV32I-NEXT:    addi s3, sp, 3
+; RV32I-NEXT:    addi s4, zero, 4
+; RV32I-NEXT:    addi s5, zero, 2
+; RV32I-NEXT:  .LBB43_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    slli a1, a0, 24
+; RV32I-NEXT:    srai a1, a1, 24
+; RV32I-NEXT:    mv a2, a0
+; RV32I-NEXT:    bge s1, a1, .LBB43_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB43_1 Depth=1
+; RV32I-NEXT:    mv a2, s2
+; RV32I-NEXT:  .LBB43_3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB43_1 Depth=1
+; RV32I-NEXT:    sb a0, 3(sp)
+; RV32I-NEXT:    mv a0, s6
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:    mv a3, s4
+; RV32I-NEXT:    mv a4, s5
+; RV32I-NEXT:    call __atomic_compare_exchange_1
+; RV32I-NEXT:    mv a1, a0
+; RV32I-NEXT:    lb a0, 3(sp)
+; RV32I-NEXT:    beqz a1, .LBB43_1
+; RV32I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV32I-NEXT:    lw s6, 4(sp)
+; RV32I-NEXT:    lw s5, 8(sp)
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw min i8* %a, i8 %b acq_rel
+  ret i8 %1
+}
+
+define i8 @atomicrmw_min_i8_seq_cst(i8 *%a, i8 %b) nounwind {
+; RV32I-LABEL: atomicrmw_min_i8_seq_cst:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    sw s5, 8(sp)
+; RV32I-NEXT:    mv s2, a1
+; RV32I-NEXT:    mv s4, a0
+; RV32I-NEXT:    lbu a0, 0(a0)
+; RV32I-NEXT:    slli a1, a1, 24
+; RV32I-NEXT:    srai s5, a1, 24
+; RV32I-NEXT:    addi s3, sp, 7
+; RV32I-NEXT:    addi s1, zero, 5
+; RV32I-NEXT:  .LBB44_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    slli a1, a0, 24
+; RV32I-NEXT:    srai a1, a1, 24
+; RV32I-NEXT:    mv a2, a0
+; RV32I-NEXT:    bge s5, a1, .LBB44_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB44_1 Depth=1
+; RV32I-NEXT:    mv a2, s2
+; RV32I-NEXT:  .LBB44_3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB44_1 Depth=1
+; RV32I-NEXT:    sb a0, 7(sp)
+; RV32I-NEXT:    mv a0, s4
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:    mv a3, s1
+; RV32I-NEXT:    mv a4, s1
+; RV32I-NEXT:    call __atomic_compare_exchange_1
+; RV32I-NEXT:    mv a1, a0
+; RV32I-NEXT:    lb a0, 7(sp)
+; RV32I-NEXT:    beqz a1, .LBB44_1
+; RV32I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV32I-NEXT:    lw s5, 8(sp)
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw min i8* %a, i8 %b seq_cst
+  ret i8 %1
+}
+
+define i8 @atomicrmw_umax_i8_monotonic(i8 *%a, i8 %b) nounwind {
+; RV32I-LABEL: atomicrmw_umax_i8_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    mv s2, a1
+; RV32I-NEXT:    mv s4, a0
+; RV32I-NEXT:    lbu a0, 0(a0)
+; RV32I-NEXT:    andi s1, a1, 255
+; RV32I-NEXT:    addi s3, sp, 11
+; RV32I-NEXT:  .LBB45_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    andi a1, a0, 255
+; RV32I-NEXT:    mv a2, a0
+; RV32I-NEXT:    bltu s1, a1, .LBB45_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB45_1 Depth=1
+; RV32I-NEXT:    mv a2, s2
+; RV32I-NEXT:  .LBB45_3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB45_1 Depth=1
+; RV32I-NEXT:    sb a0, 11(sp)
+; RV32I-NEXT:    mv a0, s4
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    call __atomic_compare_exchange_1
+; RV32I-NEXT:    mv a1, a0
+; RV32I-NEXT:    lb a0, 11(sp)
+; RV32I-NEXT:    beqz a1, .LBB45_1
+; RV32I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw umax i8* %a, i8 %b monotonic
+  ret i8 %1
+}
+
+define i8 @atomicrmw_umax_i8_acquire(i8 *%a, i8 %b) nounwind {
+; RV32I-LABEL: atomicrmw_umax_i8_acquire:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    sw s5, 8(sp)
+; RV32I-NEXT:    mv s2, a1
+; RV32I-NEXT:    mv s4, a0
+; RV32I-NEXT:    lbu a0, 0(a0)
+; RV32I-NEXT:    andi s5, a1, 255
+; RV32I-NEXT:    addi s3, sp, 7
+; RV32I-NEXT:    addi s1, zero, 2
+; RV32I-NEXT:  .LBB46_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    andi a1, a0, 255
+; RV32I-NEXT:    mv a2, a0
+; RV32I-NEXT:    bltu s5, a1, .LBB46_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB46_1 Depth=1
+; RV32I-NEXT:    mv a2, s2
+; RV32I-NEXT:  .LBB46_3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB46_1 Depth=1
+; RV32I-NEXT:    sb a0, 7(sp)
+; RV32I-NEXT:    mv a0, s4
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:    mv a3, s1
+; RV32I-NEXT:    mv a4, s1
+; RV32I-NEXT:    call __atomic_compare_exchange_1
+; RV32I-NEXT:    mv a1, a0
+; RV32I-NEXT:    lb a0, 7(sp)
+; RV32I-NEXT:    beqz a1, .LBB46_1
+; RV32I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV32I-NEXT:    lw s5, 8(sp)
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw umax i8* %a, i8 %b acquire
+  ret i8 %1
+}
+
+define i8 @atomicrmw_umax_i8_release(i8 *%a, i8 %b) nounwind {
+; RV32I-LABEL: atomicrmw_umax_i8_release:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    sw s5, 8(sp)
+; RV32I-NEXT:    mv s2, a1
+; RV32I-NEXT:    mv s5, a0
+; RV32I-NEXT:    lbu a0, 0(a0)
+; RV32I-NEXT:    andi s1, a1, 255
+; RV32I-NEXT:    addi s3, sp, 7
+; RV32I-NEXT:    addi s4, zero, 3
+; RV32I-NEXT:  .LBB47_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    andi a1, a0, 255
+; RV32I-NEXT:    mv a2, a0
+; RV32I-NEXT:    bltu s1, a1, .LBB47_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB47_1 Depth=1
+; RV32I-NEXT:    mv a2, s2
+; RV32I-NEXT:  .LBB47_3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB47_1 Depth=1
+; RV32I-NEXT:    sb a0, 7(sp)
+; RV32I-NEXT:    mv a0, s5
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:    mv a3, s4
+; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    call __atomic_compare_exchange_1
+; RV32I-NEXT:    mv a1, a0
+; RV32I-NEXT:    lb a0, 7(sp)
+; RV32I-NEXT:    beqz a1, .LBB47_1
+; RV32I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV32I-NEXT:    lw s5, 8(sp)
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw umax i8* %a, i8 %b release
+  ret i8 %1
+}
+
+define i8 @atomicrmw_umax_i8_acq_rel(i8 *%a, i8 %b) nounwind {
+; RV32I-LABEL: atomicrmw_umax_i8_acq_rel:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    sw s5, 8(sp)
+; RV32I-NEXT:    sw s6, 4(sp)
+; RV32I-NEXT:    mv s2, a1
+; RV32I-NEXT:    mv s6, a0
+; RV32I-NEXT:    lbu a0, 0(a0)
+; RV32I-NEXT:    andi s1, a1, 255
+; RV32I-NEXT:    addi s3, sp, 3
+; RV32I-NEXT:    addi s4, zero, 4
+; RV32I-NEXT:    addi s5, zero, 2
+; RV32I-NEXT:  .LBB48_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    andi a1, a0, 255
+; RV32I-NEXT:    mv a2, a0
+; RV32I-NEXT:    bltu s1, a1, .LBB48_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB48_1 Depth=1
+; RV32I-NEXT:    mv a2, s2
+; RV32I-NEXT:  .LBB48_3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB48_1 Depth=1
+; RV32I-NEXT:    sb a0, 3(sp)
+; RV32I-NEXT:    mv a0, s6
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:    mv a3, s4
+; RV32I-NEXT:    mv a4, s5
+; RV32I-NEXT:    call __atomic_compare_exchange_1
+; RV32I-NEXT:    mv a1, a0
+; RV32I-NEXT:    lb a0, 3(sp)
+; RV32I-NEXT:    beqz a1, .LBB48_1
+; RV32I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV32I-NEXT:    lw s6, 4(sp)
+; RV32I-NEXT:    lw s5, 8(sp)
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw umax i8* %a, i8 %b acq_rel
+  ret i8 %1
+}
+
+define i8 @atomicrmw_umax_i8_seq_cst(i8 *%a, i8 %b) nounwind {
+; RV32I-LABEL: atomicrmw_umax_i8_seq_cst:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    sw s5, 8(sp)
+; RV32I-NEXT:    mv s2, a1
+; RV32I-NEXT:    mv s4, a0
+; RV32I-NEXT:    lbu a0, 0(a0)
+; RV32I-NEXT:    andi s5, a1, 255
+; RV32I-NEXT:    addi s3, sp, 7
+; RV32I-NEXT:    addi s1, zero, 5
+; RV32I-NEXT:  .LBB49_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    andi a1, a0, 255
+; RV32I-NEXT:    mv a2, a0
+; RV32I-NEXT:    bltu s5, a1, .LBB49_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB49_1 Depth=1
+; RV32I-NEXT:    mv a2, s2
+; RV32I-NEXT:  .LBB49_3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB49_1 Depth=1
+; RV32I-NEXT:    sb a0, 7(sp)
+; RV32I-NEXT:    mv a0, s4
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:    mv a3, s1
+; RV32I-NEXT:    mv a4, s1
+; RV32I-NEXT:    call __atomic_compare_exchange_1
+; RV32I-NEXT:    mv a1, a0
+; RV32I-NEXT:    lb a0, 7(sp)
+; RV32I-NEXT:    beqz a1, .LBB49_1
+; RV32I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV32I-NEXT:    lw s5, 8(sp)
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw umax i8* %a, i8 %b seq_cst
+  ret i8 %1
+}
+
+define i8 @atomicrmw_umin_i8_monotonic(i8 *%a, i8 %b) nounwind {
+; RV32I-LABEL: atomicrmw_umin_i8_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    mv s2, a1
+; RV32I-NEXT:    mv s4, a0
+; RV32I-NEXT:    lbu a0, 0(a0)
+; RV32I-NEXT:    andi s1, a1, 255
+; RV32I-NEXT:    addi s3, sp, 11
+; RV32I-NEXT:  .LBB50_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    andi a1, a0, 255
+; RV32I-NEXT:    mv a2, a0
+; RV32I-NEXT:    bgeu s1, a1, .LBB50_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB50_1 Depth=1
+; RV32I-NEXT:    mv a2, s2
+; RV32I-NEXT:  .LBB50_3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB50_1 Depth=1
+; RV32I-NEXT:    sb a0, 11(sp)
+; RV32I-NEXT:    mv a0, s4
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    call __atomic_compare_exchange_1
+; RV32I-NEXT:    mv a1, a0
+; RV32I-NEXT:    lb a0, 11(sp)
+; RV32I-NEXT:    beqz a1, .LBB50_1
+; RV32I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw umin i8* %a, i8 %b monotonic
+  ret i8 %1
+}
+
+define i8 @atomicrmw_umin_i8_acquire(i8 *%a, i8 %b) nounwind {
+; RV32I-LABEL: atomicrmw_umin_i8_acquire:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    sw s5, 8(sp)
+; RV32I-NEXT:    mv s2, a1
+; RV32I-NEXT:    mv s4, a0
+; RV32I-NEXT:    lbu a0, 0(a0)
+; RV32I-NEXT:    andi s5, a1, 255
+; RV32I-NEXT:    addi s3, sp, 7
+; RV32I-NEXT:    addi s1, zero, 2
+; RV32I-NEXT:  .LBB51_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    andi a1, a0, 255
+; RV32I-NEXT:    mv a2, a0
+; RV32I-NEXT:    bgeu s5, a1, .LBB51_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB51_1 Depth=1
+; RV32I-NEXT:    mv a2, s2
+; RV32I-NEXT:  .LBB51_3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB51_1 Depth=1
+; RV32I-NEXT:    sb a0, 7(sp)
+; RV32I-NEXT:    mv a0, s4
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:    mv a3, s1
+; RV32I-NEXT:    mv a4, s1
+; RV32I-NEXT:    call __atomic_compare_exchange_1
+; RV32I-NEXT:    mv a1, a0
+; RV32I-NEXT:    lb a0, 7(sp)
+; RV32I-NEXT:    beqz a1, .LBB51_1
+; RV32I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV32I-NEXT:    lw s5, 8(sp)
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw umin i8* %a, i8 %b acquire
+  ret i8 %1
+}
+
+define i8 @atomicrmw_umin_i8_release(i8 *%a, i8 %b) nounwind {
+; RV32I-LABEL: atomicrmw_umin_i8_release:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    sw s5, 8(sp)
+; RV32I-NEXT:    mv s2, a1
+; RV32I-NEXT:    mv s5, a0
+; RV32I-NEXT:    lbu a0, 0(a0)
+; RV32I-NEXT:    andi s1, a1, 255
+; RV32I-NEXT:    addi s3, sp, 7
+; RV32I-NEXT:    addi s4, zero, 3
+; RV32I-NEXT:  .LBB52_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    andi a1, a0, 255
+; RV32I-NEXT:    mv a2, a0
+; RV32I-NEXT:    bgeu s1, a1, .LBB52_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB52_1 Depth=1
+; RV32I-NEXT:    mv a2, s2
+; RV32I-NEXT:  .LBB52_3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB52_1 Depth=1
+; RV32I-NEXT:    sb a0, 7(sp)
+; RV32I-NEXT:    mv a0, s5
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:    mv a3, s4
+; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    call __atomic_compare_exchange_1
+; RV32I-NEXT:    mv a1, a0
+; RV32I-NEXT:    lb a0, 7(sp)
+; RV32I-NEXT:    beqz a1, .LBB52_1
+; RV32I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV32I-NEXT:    lw s5, 8(sp)
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw umin i8* %a, i8 %b release
+  ret i8 %1
+}
+
+define i8 @atomicrmw_umin_i8_acq_rel(i8 *%a, i8 %b) nounwind {
+; RV32I-LABEL: atomicrmw_umin_i8_acq_rel:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    sw s5, 8(sp)
+; RV32I-NEXT:    sw s6, 4(sp)
+; RV32I-NEXT:    mv s2, a1
+; RV32I-NEXT:    mv s6, a0
+; RV32I-NEXT:    lbu a0, 0(a0)
+; RV32I-NEXT:    andi s1, a1, 255
+; RV32I-NEXT:    addi s3, sp, 3
+; RV32I-NEXT:    addi s4, zero, 4
+; RV32I-NEXT:    addi s5, zero, 2
+; RV32I-NEXT:  .LBB53_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    andi a1, a0, 255
+; RV32I-NEXT:    mv a2, a0
+; RV32I-NEXT:    bgeu s1, a1, .LBB53_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB53_1 Depth=1
+; RV32I-NEXT:    mv a2, s2
+; RV32I-NEXT:  .LBB53_3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB53_1 Depth=1
+; RV32I-NEXT:    sb a0, 3(sp)
+; RV32I-NEXT:    mv a0, s6
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:    mv a3, s4
+; RV32I-NEXT:    mv a4, s5
+; RV32I-NEXT:    call __atomic_compare_exchange_1
+; RV32I-NEXT:    mv a1, a0
+; RV32I-NEXT:    lb a0, 3(sp)
+; RV32I-NEXT:    beqz a1, .LBB53_1
+; RV32I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV32I-NEXT:    lw s6, 4(sp)
+; RV32I-NEXT:    lw s5, 8(sp)
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw umin i8* %a, i8 %b acq_rel
+  ret i8 %1
+}
+
+define i8 @atomicrmw_umin_i8_seq_cst(i8 *%a, i8 %b) nounwind {
+; RV32I-LABEL: atomicrmw_umin_i8_seq_cst:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    sw s5, 8(sp)
+; RV32I-NEXT:    mv s2, a1
+; RV32I-NEXT:    mv s4, a0
+; RV32I-NEXT:    lbu a0, 0(a0)
+; RV32I-NEXT:    andi s5, a1, 255
+; RV32I-NEXT:    addi s3, sp, 7
+; RV32I-NEXT:    addi s1, zero, 5
+; RV32I-NEXT:  .LBB54_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    andi a1, a0, 255
+; RV32I-NEXT:    mv a2, a0
+; RV32I-NEXT:    bgeu s5, a1, .LBB54_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB54_1 Depth=1
+; RV32I-NEXT:    mv a2, s2
+; RV32I-NEXT:  .LBB54_3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB54_1 Depth=1
+; RV32I-NEXT:    sb a0, 7(sp)
+; RV32I-NEXT:    mv a0, s4
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:    mv a3, s1
+; RV32I-NEXT:    mv a4, s1
+; RV32I-NEXT:    call __atomic_compare_exchange_1
+; RV32I-NEXT:    mv a1, a0
+; RV32I-NEXT:    lb a0, 7(sp)
+; RV32I-NEXT:    beqz a1, .LBB54_1
+; RV32I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV32I-NEXT:    lw s5, 8(sp)
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw umin i8* %a, i8 %b seq_cst
+  ret i8 %1
+}
+
+define i16 @atomicrmw_xchg_i16_monotonic(i16* %a, i16 %b) {
+; RV32I-LABEL: atomicrmw_xchg_i16_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    call __atomic_exchange_2
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw xchg i16* %a, i16 %b monotonic
+  ret i16 %1
+}
+
+define i16 @atomicrmw_xchg_i16_acquire(i16* %a, i16 %b) {
+; RV32I-LABEL: atomicrmw_xchg_i16_acquire:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 2
+; RV32I-NEXT:    call __atomic_exchange_2
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw xchg i16* %a, i16 %b acquire
+  ret i16 %1
+}
+
+define i16 @atomicrmw_xchg_i16_release(i16* %a, i16 %b) {
+; RV32I-LABEL: atomicrmw_xchg_i16_release:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 3
+; RV32I-NEXT:    call __atomic_exchange_2
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw xchg i16* %a, i16 %b release
+  ret i16 %1
+}
+
+define i16 @atomicrmw_xchg_i16_acq_rel(i16* %a, i16 %b) {
+; RV32I-LABEL: atomicrmw_xchg_i16_acq_rel:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 4
+; RV32I-NEXT:    call __atomic_exchange_2
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw xchg i16* %a, i16 %b acq_rel
+  ret i16 %1
+}
+
+define i16 @atomicrmw_xchg_i16_seq_cst(i16* %a, i16 %b) {
+; RV32I-LABEL: atomicrmw_xchg_i16_seq_cst:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 5
+; RV32I-NEXT:    call __atomic_exchange_2
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw xchg i16* %a, i16 %b seq_cst
+  ret i16 %1
+}
+
+define i16 @atomicrmw_add_i16_monotonic(i16 *%a, i16 %b) nounwind {
+; RV32I-LABEL: atomicrmw_add_i16_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    call __atomic_fetch_add_2
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw add i16* %a, i16 %b monotonic
+  ret i16 %1
+}
+
+define i16 @atomicrmw_add_i16_acquire(i16 *%a, i16 %b) nounwind {
+; RV32I-LABEL: atomicrmw_add_i16_acquire:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 2
+; RV32I-NEXT:    call __atomic_fetch_add_2
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw add i16* %a, i16 %b acquire
+  ret i16 %1
+}
+
+define i16 @atomicrmw_add_i16_release(i16 *%a, i16 %b) nounwind {
+; RV32I-LABEL: atomicrmw_add_i16_release:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 3
+; RV32I-NEXT:    call __atomic_fetch_add_2
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw add i16* %a, i16 %b release
+  ret i16 %1
+}
+
+define i16 @atomicrmw_add_i16_acq_rel(i16 *%a, i16 %b) nounwind {
+; RV32I-LABEL: atomicrmw_add_i16_acq_rel:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 4
+; RV32I-NEXT:    call __atomic_fetch_add_2
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw add i16* %a, i16 %b acq_rel
+  ret i16 %1
+}
+
+define i16 @atomicrmw_add_i16_seq_cst(i16 *%a, i16 %b) nounwind {
+; RV32I-LABEL: atomicrmw_add_i16_seq_cst:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 5
+; RV32I-NEXT:    call __atomic_fetch_add_2
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw add i16* %a, i16 %b seq_cst
+  ret i16 %1
+}
+
+define i16 @atomicrmw_sub_i16_monotonic(i16* %a, i16 %b) {
+; RV32I-LABEL: atomicrmw_sub_i16_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    call __atomic_fetch_sub_2
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw sub i16* %a, i16 %b monotonic
+  ret i16 %1
+}
+
+define i16 @atomicrmw_sub_i16_acquire(i16* %a, i16 %b) {
+; RV32I-LABEL: atomicrmw_sub_i16_acquire:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 2
+; RV32I-NEXT:    call __atomic_fetch_sub_2
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw sub i16* %a, i16 %b acquire
+  ret i16 %1
+}
+
+define i16 @atomicrmw_sub_i16_release(i16* %a, i16 %b) {
+; RV32I-LABEL: atomicrmw_sub_i16_release:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 3
+; RV32I-NEXT:    call __atomic_fetch_sub_2
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw sub i16* %a, i16 %b release
+  ret i16 %1
+}
+
+define i16 @atomicrmw_sub_i16_acq_rel(i16* %a, i16 %b) {
+; RV32I-LABEL: atomicrmw_sub_i16_acq_rel:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 4
+; RV32I-NEXT:    call __atomic_fetch_sub_2
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw sub i16* %a, i16 %b acq_rel
+  ret i16 %1
+}
+
+define i16 @atomicrmw_sub_i16_seq_cst(i16* %a, i16 %b) {
+; RV32I-LABEL: atomicrmw_sub_i16_seq_cst:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 5
+; RV32I-NEXT:    call __atomic_fetch_sub_2
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw sub i16* %a, i16 %b seq_cst
+  ret i16 %1
+}
+
+define i16 @atomicrmw_and_i16_monotonic(i16 *%a, i16 %b) nounwind {
+; RV32I-LABEL: atomicrmw_and_i16_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    call __atomic_fetch_and_2
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw and i16* %a, i16 %b monotonic
+  ret i16 %1
+}
+
+define i16 @atomicrmw_and_i16_acquire(i16 *%a, i16 %b) nounwind {
+; RV32I-LABEL: atomicrmw_and_i16_acquire:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 2
+; RV32I-NEXT:    call __atomic_fetch_and_2
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw and i16* %a, i16 %b acquire
+  ret i16 %1
+}
+
+define i16 @atomicrmw_and_i16_release(i16 *%a, i16 %b) nounwind {
+; RV32I-LABEL: atomicrmw_and_i16_release:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 3
+; RV32I-NEXT:    call __atomic_fetch_and_2
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw and i16* %a, i16 %b release
+  ret i16 %1
+}
+
+define i16 @atomicrmw_and_i16_acq_rel(i16 *%a, i16 %b) nounwind {
+; RV32I-LABEL: atomicrmw_and_i16_acq_rel:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 4
+; RV32I-NEXT:    call __atomic_fetch_and_2
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw and i16* %a, i16 %b acq_rel
+  ret i16 %1
+}
+
+define i16 @atomicrmw_and_i16_seq_cst(i16 *%a, i16 %b) nounwind {
+; RV32I-LABEL: atomicrmw_and_i16_seq_cst:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 5
+; RV32I-NEXT:    call __atomic_fetch_and_2
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw and i16* %a, i16 %b seq_cst
+  ret i16 %1
+}
+
+define i16 @atomicrmw_nand_i16_monotonic(i16* %a, i16 %b) {
+; RV32I-LABEL: atomicrmw_nand_i16_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    call __atomic_fetch_nand_2
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw nand i16* %a, i16 %b monotonic
+  ret i16 %1
+}
+
+define i16 @atomicrmw_nand_i16_acquire(i16* %a, i16 %b) {
+; RV32I-LABEL: atomicrmw_nand_i16_acquire:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 2
+; RV32I-NEXT:    call __atomic_fetch_nand_2
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw nand i16* %a, i16 %b acquire
+  ret i16 %1
+}
+
+define i16 @atomicrmw_nand_i16_release(i16* %a, i16 %b) {
+; RV32I-LABEL: atomicrmw_nand_i16_release:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 3
+; RV32I-NEXT:    call __atomic_fetch_nand_2
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw nand i16* %a, i16 %b release
+  ret i16 %1
+}
+
+define i16 @atomicrmw_nand_i16_acq_rel(i16* %a, i16 %b) {
+; RV32I-LABEL: atomicrmw_nand_i16_acq_rel:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 4
+; RV32I-NEXT:    call __atomic_fetch_nand_2
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw nand i16* %a, i16 %b acq_rel
+  ret i16 %1
+}
+
+define i16 @atomicrmw_nand_i16_seq_cst(i16* %a, i16 %b) {
+; RV32I-LABEL: atomicrmw_nand_i16_seq_cst:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 5
+; RV32I-NEXT:    call __atomic_fetch_nand_2
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw nand i16* %a, i16 %b seq_cst
+  ret i16 %1
+}
+
+define i16 @atomicrmw_or_i16_monotonic(i16 *%a, i16 %b) nounwind {
+; RV32I-LABEL: atomicrmw_or_i16_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    call __atomic_fetch_or_2
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw or i16* %a, i16 %b monotonic
+  ret i16 %1
+}
+
+define i16 @atomicrmw_or_i16_acquire(i16 *%a, i16 %b) nounwind {
+; RV32I-LABEL: atomicrmw_or_i16_acquire:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 2
+; RV32I-NEXT:    call __atomic_fetch_or_2
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw or i16* %a, i16 %b acquire
+  ret i16 %1
+}
+
+define i16 @atomicrmw_or_i16_release(i16 *%a, i16 %b) nounwind {
+; RV32I-LABEL: atomicrmw_or_i16_release:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 3
+; RV32I-NEXT:    call __atomic_fetch_or_2
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw or i16* %a, i16 %b release
+  ret i16 %1
+}
+
+define i16 @atomicrmw_or_i16_acq_rel(i16 *%a, i16 %b) nounwind {
+; RV32I-LABEL: atomicrmw_or_i16_acq_rel:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 4
+; RV32I-NEXT:    call __atomic_fetch_or_2
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw or i16* %a, i16 %b acq_rel
+  ret i16 %1
+}
+
+define i16 @atomicrmw_or_i16_seq_cst(i16 *%a, i16 %b) nounwind {
+; RV32I-LABEL: atomicrmw_or_i16_seq_cst:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 5
+; RV32I-NEXT:    call __atomic_fetch_or_2
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw or i16* %a, i16 %b seq_cst
+  ret i16 %1
+}
+
+define i16 @atomicrmw_xor_i16_monotonic(i16 *%a, i16 %b) nounwind {
+; RV32I-LABEL: atomicrmw_xor_i16_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    call __atomic_fetch_xor_2
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw xor i16* %a, i16 %b monotonic
+  ret i16 %1
+}
+
+define i16 @atomicrmw_xor_i16_acquire(i16 *%a, i16 %b) nounwind {
+; RV32I-LABEL: atomicrmw_xor_i16_acquire:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 2
+; RV32I-NEXT:    call __atomic_fetch_xor_2
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw xor i16* %a, i16 %b acquire
+  ret i16 %1
+}
+
+define i16 @atomicrmw_xor_i16_release(i16 *%a, i16 %b) nounwind {
+; RV32I-LABEL: atomicrmw_xor_i16_release:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 3
+; RV32I-NEXT:    call __atomic_fetch_xor_2
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw xor i16* %a, i16 %b release
+  ret i16 %1
+}
+
+define i16 @atomicrmw_xor_i16_acq_rel(i16 *%a, i16 %b) nounwind {
+; RV32I-LABEL: atomicrmw_xor_i16_acq_rel:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 4
+; RV32I-NEXT:    call __atomic_fetch_xor_2
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw xor i16* %a, i16 %b acq_rel
+  ret i16 %1
+}
+
+define i16 @atomicrmw_xor_i16_seq_cst(i16 *%a, i16 %b) nounwind {
+; RV32I-LABEL: atomicrmw_xor_i16_seq_cst:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 5
+; RV32I-NEXT:    call __atomic_fetch_xor_2
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw xor i16* %a, i16 %b seq_cst
+  ret i16 %1
+}
+
+define i16 @atomicrmw_max_i16_monotonic(i16 *%a, i16 %b) nounwind {
+; RV32I-LABEL: atomicrmw_max_i16_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    mv s2, a1
+; RV32I-NEXT:    mv s4, a0
+; RV32I-NEXT:    lhu a0, 0(a0)
+; RV32I-NEXT:    slli a1, a1, 16
+; RV32I-NEXT:    srai s1, a1, 16
+; RV32I-NEXT:    addi s3, sp, 10
+; RV32I-NEXT:  .LBB90_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    slli a1, a0, 16
+; RV32I-NEXT:    srai a1, a1, 16
+; RV32I-NEXT:    mv a2, a0
+; RV32I-NEXT:    blt s1, a1, .LBB90_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB90_1 Depth=1
+; RV32I-NEXT:    mv a2, s2
+; RV32I-NEXT:  .LBB90_3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB90_1 Depth=1
+; RV32I-NEXT:    sh a0, 10(sp)
+; RV32I-NEXT:    mv a0, s4
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    call __atomic_compare_exchange_2
+; RV32I-NEXT:    mv a1, a0
+; RV32I-NEXT:    lh a0, 10(sp)
+; RV32I-NEXT:    beqz a1, .LBB90_1
+; RV32I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw max i16* %a, i16 %b monotonic
+  ret i16 %1
+}
+
+define i16 @atomicrmw_max_i16_acquire(i16 *%a, i16 %b) nounwind {
+; RV32I-LABEL: atomicrmw_max_i16_acquire:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    sw s5, 8(sp)
+; RV32I-NEXT:    mv s2, a1
+; RV32I-NEXT:    mv s4, a0
+; RV32I-NEXT:    lhu a0, 0(a0)
+; RV32I-NEXT:    slli a1, a1, 16
+; RV32I-NEXT:    srai s5, a1, 16
+; RV32I-NEXT:    addi s3, sp, 6
+; RV32I-NEXT:    addi s1, zero, 2
+; RV32I-NEXT:  .LBB91_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    slli a1, a0, 16
+; RV32I-NEXT:    srai a1, a1, 16
+; RV32I-NEXT:    mv a2, a0
+; RV32I-NEXT:    blt s5, a1, .LBB91_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB91_1 Depth=1
+; RV32I-NEXT:    mv a2, s2
+; RV32I-NEXT:  .LBB91_3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB91_1 Depth=1
+; RV32I-NEXT:    sh a0, 6(sp)
+; RV32I-NEXT:    mv a0, s4
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:    mv a3, s1
+; RV32I-NEXT:    mv a4, s1
+; RV32I-NEXT:    call __atomic_compare_exchange_2
+; RV32I-NEXT:    mv a1, a0
+; RV32I-NEXT:    lh a0, 6(sp)
+; RV32I-NEXT:    beqz a1, .LBB91_1
+; RV32I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV32I-NEXT:    lw s5, 8(sp)
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw max i16* %a, i16 %b acquire
+  ret i16 %1
+}
+
+define i16 @atomicrmw_max_i16_release(i16 *%a, i16 %b) nounwind {
+; RV32I-LABEL: atomicrmw_max_i16_release:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    sw s5, 8(sp)
+; RV32I-NEXT:    mv s2, a1
+; RV32I-NEXT:    mv s5, a0
+; RV32I-NEXT:    lhu a0, 0(a0)
+; RV32I-NEXT:    slli a1, a1, 16
+; RV32I-NEXT:    srai s1, a1, 16
+; RV32I-NEXT:    addi s3, sp, 6
+; RV32I-NEXT:    addi s4, zero, 3
+; RV32I-NEXT:  .LBB92_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    slli a1, a0, 16
+; RV32I-NEXT:    srai a1, a1, 16
+; RV32I-NEXT:    mv a2, a0
+; RV32I-NEXT:    blt s1, a1, .LBB92_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB92_1 Depth=1
+; RV32I-NEXT:    mv a2, s2
+; RV32I-NEXT:  .LBB92_3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB92_1 Depth=1
+; RV32I-NEXT:    sh a0, 6(sp)
+; RV32I-NEXT:    mv a0, s5
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:    mv a3, s4
+; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    call __atomic_compare_exchange_2
+; RV32I-NEXT:    mv a1, a0
+; RV32I-NEXT:    lh a0, 6(sp)
+; RV32I-NEXT:    beqz a1, .LBB92_1
+; RV32I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV32I-NEXT:    lw s5, 8(sp)
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw max i16* %a, i16 %b release
+  ret i16 %1
+}
+
+define i16 @atomicrmw_max_i16_acq_rel(i16 *%a, i16 %b) nounwind {
+; RV32I-LABEL: atomicrmw_max_i16_acq_rel:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    sw s5, 8(sp)
+; RV32I-NEXT:    sw s6, 4(sp)
+; RV32I-NEXT:    mv s2, a1
+; RV32I-NEXT:    mv s6, a0
+; RV32I-NEXT:    lhu a0, 0(a0)
+; RV32I-NEXT:    slli a1, a1, 16
+; RV32I-NEXT:    srai s1, a1, 16
+; RV32I-NEXT:    addi s3, sp, 2
+; RV32I-NEXT:    addi s4, zero, 4
+; RV32I-NEXT:    addi s5, zero, 2
+; RV32I-NEXT:  .LBB93_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    slli a1, a0, 16
+; RV32I-NEXT:    srai a1, a1, 16
+; RV32I-NEXT:    mv a2, a0
+; RV32I-NEXT:    blt s1, a1, .LBB93_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB93_1 Depth=1
+; RV32I-NEXT:    mv a2, s2
+; RV32I-NEXT:  .LBB93_3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB93_1 Depth=1
+; RV32I-NEXT:    sh a0, 2(sp)
+; RV32I-NEXT:    mv a0, s6
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:    mv a3, s4
+; RV32I-NEXT:    mv a4, s5
+; RV32I-NEXT:    call __atomic_compare_exchange_2
+; RV32I-NEXT:    mv a1, a0
+; RV32I-NEXT:    lh a0, 2(sp)
+; RV32I-NEXT:    beqz a1, .LBB93_1
+; RV32I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV32I-NEXT:    lw s6, 4(sp)
+; RV32I-NEXT:    lw s5, 8(sp)
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw max i16* %a, i16 %b acq_rel
+  ret i16 %1
+}
+
+define i16 @atomicrmw_max_i16_seq_cst(i16 *%a, i16 %b) nounwind {
+; RV32I-LABEL: atomicrmw_max_i16_seq_cst:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    sw s5, 8(sp)
+; RV32I-NEXT:    mv s2, a1
+; RV32I-NEXT:    mv s4, a0
+; RV32I-NEXT:    lhu a0, 0(a0)
+; RV32I-NEXT:    slli a1, a1, 16
+; RV32I-NEXT:    srai s5, a1, 16
+; RV32I-NEXT:    addi s3, sp, 6
+; RV32I-NEXT:    addi s1, zero, 5
+; RV32I-NEXT:  .LBB94_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    slli a1, a0, 16
+; RV32I-NEXT:    srai a1, a1, 16
+; RV32I-NEXT:    mv a2, a0
+; RV32I-NEXT:    blt s5, a1, .LBB94_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB94_1 Depth=1
+; RV32I-NEXT:    mv a2, s2
+; RV32I-NEXT:  .LBB94_3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB94_1 Depth=1
+; RV32I-NEXT:    sh a0, 6(sp)
+; RV32I-NEXT:    mv a0, s4
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:    mv a3, s1
+; RV32I-NEXT:    mv a4, s1
+; RV32I-NEXT:    call __atomic_compare_exchange_2
+; RV32I-NEXT:    mv a1, a0
+; RV32I-NEXT:    lh a0, 6(sp)
+; RV32I-NEXT:    beqz a1, .LBB94_1
+; RV32I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV32I-NEXT:    lw s5, 8(sp)
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw max i16* %a, i16 %b seq_cst
+  ret i16 %1
+}
+
+define i16 @atomicrmw_min_i16_monotonic(i16 *%a, i16 %b) nounwind {
+; RV32I-LABEL: atomicrmw_min_i16_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    mv s2, a1
+; RV32I-NEXT:    mv s4, a0
+; RV32I-NEXT:    lhu a0, 0(a0)
+; RV32I-NEXT:    slli a1, a1, 16
+; RV32I-NEXT:    srai s1, a1, 16
+; RV32I-NEXT:    addi s3, sp, 10
+; RV32I-NEXT:  .LBB95_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    slli a1, a0, 16
+; RV32I-NEXT:    srai a1, a1, 16
+; RV32I-NEXT:    mv a2, a0
+; RV32I-NEXT:    bge s1, a1, .LBB95_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB95_1 Depth=1
+; RV32I-NEXT:    mv a2, s2
+; RV32I-NEXT:  .LBB95_3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB95_1 Depth=1
+; RV32I-NEXT:    sh a0, 10(sp)
+; RV32I-NEXT:    mv a0, s4
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    call __atomic_compare_exchange_2
+; RV32I-NEXT:    mv a1, a0
+; RV32I-NEXT:    lh a0, 10(sp)
+; RV32I-NEXT:    beqz a1, .LBB95_1
+; RV32I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw min i16* %a, i16 %b monotonic
+  ret i16 %1
+}
+
+define i16 @atomicrmw_min_i16_acquire(i16 *%a, i16 %b) nounwind {
+; RV32I-LABEL: atomicrmw_min_i16_acquire:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    sw s5, 8(sp)
+; RV32I-NEXT:    mv s2, a1
+; RV32I-NEXT:    mv s4, a0
+; RV32I-NEXT:    lhu a0, 0(a0)
+; RV32I-NEXT:    slli a1, a1, 16
+; RV32I-NEXT:    srai s5, a1, 16
+; RV32I-NEXT:    addi s3, sp, 6
+; RV32I-NEXT:    addi s1, zero, 2
+; RV32I-NEXT:  .LBB96_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    slli a1, a0, 16
+; RV32I-NEXT:    srai a1, a1, 16
+; RV32I-NEXT:    mv a2, a0
+; RV32I-NEXT:    bge s5, a1, .LBB96_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB96_1 Depth=1
+; RV32I-NEXT:    mv a2, s2
+; RV32I-NEXT:  .LBB96_3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB96_1 Depth=1
+; RV32I-NEXT:    sh a0, 6(sp)
+; RV32I-NEXT:    mv a0, s4
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:    mv a3, s1
+; RV32I-NEXT:    mv a4, s1
+; RV32I-NEXT:    call __atomic_compare_exchange_2
+; RV32I-NEXT:    mv a1, a0
+; RV32I-NEXT:    lh a0, 6(sp)
+; RV32I-NEXT:    beqz a1, .LBB96_1
+; RV32I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV32I-NEXT:    lw s5, 8(sp)
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw min i16* %a, i16 %b acquire
+  ret i16 %1
+}
+
+define i16 @atomicrmw_min_i16_release(i16 *%a, i16 %b) nounwind {
+; RV32I-LABEL: atomicrmw_min_i16_release:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    sw s5, 8(sp)
+; RV32I-NEXT:    mv s2, a1
+; RV32I-NEXT:    mv s5, a0
+; RV32I-NEXT:    lhu a0, 0(a0)
+; RV32I-NEXT:    slli a1, a1, 16
+; RV32I-NEXT:    srai s1, a1, 16
+; RV32I-NEXT:    addi s3, sp, 6
+; RV32I-NEXT:    addi s4, zero, 3
+; RV32I-NEXT:  .LBB97_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    slli a1, a0, 16
+; RV32I-NEXT:    srai a1, a1, 16
+; RV32I-NEXT:    mv a2, a0
+; RV32I-NEXT:    bge s1, a1, .LBB97_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB97_1 Depth=1
+; RV32I-NEXT:    mv a2, s2
+; RV32I-NEXT:  .LBB97_3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB97_1 Depth=1
+; RV32I-NEXT:    sh a0, 6(sp)
+; RV32I-NEXT:    mv a0, s5
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:    mv a3, s4
+; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    call __atomic_compare_exchange_2
+; RV32I-NEXT:    mv a1, a0
+; RV32I-NEXT:    lh a0, 6(sp)
+; RV32I-NEXT:    beqz a1, .LBB97_1
+; RV32I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV32I-NEXT:    lw s5, 8(sp)
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw min i16* %a, i16 %b release
+  ret i16 %1
+}
+
+define i16 @atomicrmw_min_i16_acq_rel(i16 *%a, i16 %b) nounwind {
+; RV32I-LABEL: atomicrmw_min_i16_acq_rel:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    sw s5, 8(sp)
+; RV32I-NEXT:    sw s6, 4(sp)
+; RV32I-NEXT:    mv s2, a1
+; RV32I-NEXT:    mv s6, a0
+; RV32I-NEXT:    lhu a0, 0(a0)
+; RV32I-NEXT:    slli a1, a1, 16
+; RV32I-NEXT:    srai s1, a1, 16
+; RV32I-NEXT:    addi s3, sp, 2
+; RV32I-NEXT:    addi s4, zero, 4
+; RV32I-NEXT:    addi s5, zero, 2
+; RV32I-NEXT:  .LBB98_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    slli a1, a0, 16
+; RV32I-NEXT:    srai a1, a1, 16
+; RV32I-NEXT:    mv a2, a0
+; RV32I-NEXT:    bge s1, a1, .LBB98_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB98_1 Depth=1
+; RV32I-NEXT:    mv a2, s2
+; RV32I-NEXT:  .LBB98_3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB98_1 Depth=1
+; RV32I-NEXT:    sh a0, 2(sp)
+; RV32I-NEXT:    mv a0, s6
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:    mv a3, s4
+; RV32I-NEXT:    mv a4, s5
+; RV32I-NEXT:    call __atomic_compare_exchange_2
+; RV32I-NEXT:    mv a1, a0
+; RV32I-NEXT:    lh a0, 2(sp)
+; RV32I-NEXT:    beqz a1, .LBB98_1
+; RV32I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV32I-NEXT:    lw s6, 4(sp)
+; RV32I-NEXT:    lw s5, 8(sp)
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw min i16* %a, i16 %b acq_rel
+  ret i16 %1
+}
+
+define i16 @atomicrmw_min_i16_seq_cst(i16 *%a, i16 %b) nounwind {
+; RV32I-LABEL: atomicrmw_min_i16_seq_cst:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    sw s5, 8(sp)
+; RV32I-NEXT:    mv s2, a1
+; RV32I-NEXT:    mv s4, a0
+; RV32I-NEXT:    lhu a0, 0(a0)
+; RV32I-NEXT:    slli a1, a1, 16
+; RV32I-NEXT:    srai s5, a1, 16
+; RV32I-NEXT:    addi s3, sp, 6
+; RV32I-NEXT:    addi s1, zero, 5
+; RV32I-NEXT:  .LBB99_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    slli a1, a0, 16
+; RV32I-NEXT:    srai a1, a1, 16
+; RV32I-NEXT:    mv a2, a0
+; RV32I-NEXT:    bge s5, a1, .LBB99_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB99_1 Depth=1
+; RV32I-NEXT:    mv a2, s2
+; RV32I-NEXT:  .LBB99_3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB99_1 Depth=1
+; RV32I-NEXT:    sh a0, 6(sp)
+; RV32I-NEXT:    mv a0, s4
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:    mv a3, s1
+; RV32I-NEXT:    mv a4, s1
+; RV32I-NEXT:    call __atomic_compare_exchange_2
+; RV32I-NEXT:    mv a1, a0
+; RV32I-NEXT:    lh a0, 6(sp)
+; RV32I-NEXT:    beqz a1, .LBB99_1
+; RV32I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV32I-NEXT:    lw s5, 8(sp)
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw min i16* %a, i16 %b seq_cst
+  ret i16 %1
+}
+
+define i16 @atomicrmw_umax_i16_monotonic(i16 *%a, i16 %b) nounwind {
+; RV32I-LABEL: atomicrmw_umax_i16_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    sw s5, 8(sp)
+; RV32I-NEXT:    mv s2, a1
+; RV32I-NEXT:    mv s4, a0
+; RV32I-NEXT:    lhu a0, 0(a0)
+; RV32I-NEXT:    lui a1, 16
+; RV32I-NEXT:    addi s1, a1, -1
+; RV32I-NEXT:    and s5, s2, s1
+; RV32I-NEXT:    addi s3, sp, 6
+; RV32I-NEXT:  .LBB100_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    and a1, a0, s1
+; RV32I-NEXT:    mv a2, a0
+; RV32I-NEXT:    bltu s5, a1, .LBB100_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB100_1 Depth=1
+; RV32I-NEXT:    mv a2, s2
+; RV32I-NEXT:  .LBB100_3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB100_1 Depth=1
+; RV32I-NEXT:    sh a0, 6(sp)
+; RV32I-NEXT:    mv a0, s4
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    call __atomic_compare_exchange_2
+; RV32I-NEXT:    mv a1, a0
+; RV32I-NEXT:    lh a0, 6(sp)
+; RV32I-NEXT:    beqz a1, .LBB100_1
+; RV32I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV32I-NEXT:    lw s5, 8(sp)
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw umax i16* %a, i16 %b monotonic
+  ret i16 %1
+}
+
+define i16 @atomicrmw_umax_i16_acquire(i16 *%a, i16 %b) nounwind {
+; RV32I-LABEL: atomicrmw_umax_i16_acquire:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    sw s5, 8(sp)
+; RV32I-NEXT:    sw s6, 4(sp)
+; RV32I-NEXT:    mv s2, a1
+; RV32I-NEXT:    mv s4, a0
+; RV32I-NEXT:    lhu a0, 0(a0)
+; RV32I-NEXT:    lui a1, 16
+; RV32I-NEXT:    addi s5, a1, -1
+; RV32I-NEXT:    and s6, s2, s5
+; RV32I-NEXT:    addi s3, sp, 2
+; RV32I-NEXT:    addi s1, zero, 2
+; RV32I-NEXT:  .LBB101_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    and a1, a0, s5
+; RV32I-NEXT:    mv a2, a0
+; RV32I-NEXT:    bltu s6, a1, .LBB101_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB101_1 Depth=1
+; RV32I-NEXT:    mv a2, s2
+; RV32I-NEXT:  .LBB101_3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB101_1 Depth=1
+; RV32I-NEXT:    sh a0, 2(sp)
+; RV32I-NEXT:    mv a0, s4
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:    mv a3, s1
+; RV32I-NEXT:    mv a4, s1
+; RV32I-NEXT:    call __atomic_compare_exchange_2
+; RV32I-NEXT:    mv a1, a0
+; RV32I-NEXT:    lh a0, 2(sp)
+; RV32I-NEXT:    beqz a1, .LBB101_1
+; RV32I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV32I-NEXT:    lw s6, 4(sp)
+; RV32I-NEXT:    lw s5, 8(sp)
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw umax i16* %a, i16 %b acquire
+  ret i16 %1
+}
+
+define i16 @atomicrmw_umax_i16_release(i16 *%a, i16 %b) nounwind {
+; RV32I-LABEL: atomicrmw_umax_i16_release:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    sw s5, 8(sp)
+; RV32I-NEXT:    sw s6, 4(sp)
+; RV32I-NEXT:    mv s2, a1
+; RV32I-NEXT:    mv s5, a0
+; RV32I-NEXT:    lhu a0, 0(a0)
+; RV32I-NEXT:    lui a1, 16
+; RV32I-NEXT:    addi s1, a1, -1
+; RV32I-NEXT:    and s6, s2, s1
+; RV32I-NEXT:    addi s3, sp, 2
+; RV32I-NEXT:    addi s4, zero, 3
+; RV32I-NEXT:  .LBB102_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    and a1, a0, s1
+; RV32I-NEXT:    mv a2, a0
+; RV32I-NEXT:    bltu s6, a1, .LBB102_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB102_1 Depth=1
+; RV32I-NEXT:    mv a2, s2
+; RV32I-NEXT:  .LBB102_3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB102_1 Depth=1
+; RV32I-NEXT:    sh a0, 2(sp)
+; RV32I-NEXT:    mv a0, s5
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:    mv a3, s4
+; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    call __atomic_compare_exchange_2
+; RV32I-NEXT:    mv a1, a0
+; RV32I-NEXT:    lh a0, 2(sp)
+; RV32I-NEXT:    beqz a1, .LBB102_1
+; RV32I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV32I-NEXT:    lw s6, 4(sp)
+; RV32I-NEXT:    lw s5, 8(sp)
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw umax i16* %a, i16 %b release
+  ret i16 %1
+}
+
+define i16 @atomicrmw_umax_i16_acq_rel(i16 *%a, i16 %b) nounwind {
+; RV32I-LABEL: atomicrmw_umax_i16_acq_rel:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -48
+; RV32I-NEXT:    sw ra, 44(sp)
+; RV32I-NEXT:    sw s1, 40(sp)
+; RV32I-NEXT:    sw s2, 36(sp)
+; RV32I-NEXT:    sw s3, 32(sp)
+; RV32I-NEXT:    sw s4, 28(sp)
+; RV32I-NEXT:    sw s5, 24(sp)
+; RV32I-NEXT:    sw s6, 20(sp)
+; RV32I-NEXT:    sw s7, 16(sp)
+; RV32I-NEXT:    mv s2, a1
+; RV32I-NEXT:    mv s6, a0
+; RV32I-NEXT:    lhu a0, 0(a0)
+; RV32I-NEXT:    lui a1, 16
+; RV32I-NEXT:    addi s1, a1, -1
+; RV32I-NEXT:    and s7, s2, s1
+; RV32I-NEXT:    addi s3, sp, 14
+; RV32I-NEXT:    addi s4, zero, 4
+; RV32I-NEXT:    addi s5, zero, 2
+; RV32I-NEXT:  .LBB103_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    and a1, a0, s1
+; RV32I-NEXT:    mv a2, a0
+; RV32I-NEXT:    bltu s7, a1, .LBB103_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB103_1 Depth=1
+; RV32I-NEXT:    mv a2, s2
+; RV32I-NEXT:  .LBB103_3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB103_1 Depth=1
+; RV32I-NEXT:    sh a0, 14(sp)
+; RV32I-NEXT:    mv a0, s6
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:    mv a3, s4
+; RV32I-NEXT:    mv a4, s5
+; RV32I-NEXT:    call __atomic_compare_exchange_2
+; RV32I-NEXT:    mv a1, a0
+; RV32I-NEXT:    lh a0, 14(sp)
+; RV32I-NEXT:    beqz a1, .LBB103_1
+; RV32I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV32I-NEXT:    lw s7, 16(sp)
+; RV32I-NEXT:    lw s6, 20(sp)
+; RV32I-NEXT:    lw s5, 24(sp)
+; RV32I-NEXT:    lw s4, 28(sp)
+; RV32I-NEXT:    lw s3, 32(sp)
+; RV32I-NEXT:    lw s2, 36(sp)
+; RV32I-NEXT:    lw s1, 40(sp)
+; RV32I-NEXT:    lw ra, 44(sp)
+; RV32I-NEXT:    addi sp, sp, 48
+; RV32I-NEXT:    ret
+  %1 = atomicrmw umax i16* %a, i16 %b acq_rel
+  ret i16 %1
+}
+
+define i16 @atomicrmw_umax_i16_seq_cst(i16 *%a, i16 %b) nounwind {
+; RV32I-LABEL: atomicrmw_umax_i16_seq_cst:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    sw s5, 8(sp)
+; RV32I-NEXT:    sw s6, 4(sp)
+; RV32I-NEXT:    mv s2, a1
+; RV32I-NEXT:    mv s4, a0
+; RV32I-NEXT:    lhu a0, 0(a0)
+; RV32I-NEXT:    lui a1, 16
+; RV32I-NEXT:    addi s5, a1, -1
+; RV32I-NEXT:    and s6, s2, s5
+; RV32I-NEXT:    addi s3, sp, 2
+; RV32I-NEXT:    addi s1, zero, 5
+; RV32I-NEXT:  .LBB104_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    and a1, a0, s5
+; RV32I-NEXT:    mv a2, a0
+; RV32I-NEXT:    bltu s6, a1, .LBB104_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB104_1 Depth=1
+; RV32I-NEXT:    mv a2, s2
+; RV32I-NEXT:  .LBB104_3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB104_1 Depth=1
+; RV32I-NEXT:    sh a0, 2(sp)
+; RV32I-NEXT:    mv a0, s4
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:    mv a3, s1
+; RV32I-NEXT:    mv a4, s1
+; RV32I-NEXT:    call __atomic_compare_exchange_2
+; RV32I-NEXT:    mv a1, a0
+; RV32I-NEXT:    lh a0, 2(sp)
+; RV32I-NEXT:    beqz a1, .LBB104_1
+; RV32I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV32I-NEXT:    lw s6, 4(sp)
+; RV32I-NEXT:    lw s5, 8(sp)
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw umax i16* %a, i16 %b seq_cst
+  ret i16 %1
+}
+
+define i16 @atomicrmw_umin_i16_monotonic(i16 *%a, i16 %b) nounwind {
+; RV32I-LABEL: atomicrmw_umin_i16_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    sw s5, 8(sp)
+; RV32I-NEXT:    mv s2, a1
+; RV32I-NEXT:    mv s4, a0
+; RV32I-NEXT:    lhu a0, 0(a0)
+; RV32I-NEXT:    lui a1, 16
+; RV32I-NEXT:    addi s1, a1, -1
+; RV32I-NEXT:    and s5, s2, s1
+; RV32I-NEXT:    addi s3, sp, 6
+; RV32I-NEXT:  .LBB105_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    and a1, a0, s1
+; RV32I-NEXT:    mv a2, a0
+; RV32I-NEXT:    bgeu s5, a1, .LBB105_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB105_1 Depth=1
+; RV32I-NEXT:    mv a2, s2
+; RV32I-NEXT:  .LBB105_3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB105_1 Depth=1
+; RV32I-NEXT:    sh a0, 6(sp)
+; RV32I-NEXT:    mv a0, s4
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    call __atomic_compare_exchange_2
+; RV32I-NEXT:    mv a1, a0
+; RV32I-NEXT:    lh a0, 6(sp)
+; RV32I-NEXT:    beqz a1, .LBB105_1
+; RV32I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV32I-NEXT:    lw s5, 8(sp)
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw umin i16* %a, i16 %b monotonic
+  ret i16 %1
+}
+
+define i16 @atomicrmw_umin_i16_acquire(i16 *%a, i16 %b) nounwind {
+; RV32I-LABEL: atomicrmw_umin_i16_acquire:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    sw s5, 8(sp)
+; RV32I-NEXT:    sw s6, 4(sp)
+; RV32I-NEXT:    mv s2, a1
+; RV32I-NEXT:    mv s4, a0
+; RV32I-NEXT:    lhu a0, 0(a0)
+; RV32I-NEXT:    lui a1, 16
+; RV32I-NEXT:    addi s5, a1, -1
+; RV32I-NEXT:    and s6, s2, s5
+; RV32I-NEXT:    addi s3, sp, 2
+; RV32I-NEXT:    addi s1, zero, 2
+; RV32I-NEXT:  .LBB106_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    and a1, a0, s5
+; RV32I-NEXT:    mv a2, a0
+; RV32I-NEXT:    bgeu s6, a1, .LBB106_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB106_1 Depth=1
+; RV32I-NEXT:    mv a2, s2
+; RV32I-NEXT:  .LBB106_3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB106_1 Depth=1
+; RV32I-NEXT:    sh a0, 2(sp)
+; RV32I-NEXT:    mv a0, s4
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:    mv a3, s1
+; RV32I-NEXT:    mv a4, s1
+; RV32I-NEXT:    call __atomic_compare_exchange_2
+; RV32I-NEXT:    mv a1, a0
+; RV32I-NEXT:    lh a0, 2(sp)
+; RV32I-NEXT:    beqz a1, .LBB106_1
+; RV32I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV32I-NEXT:    lw s6, 4(sp)
+; RV32I-NEXT:    lw s5, 8(sp)
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw umin i16* %a, i16 %b acquire
+  ret i16 %1
+}
+
+define i16 @atomicrmw_umin_i16_release(i16 *%a, i16 %b) nounwind {
+; RV32I-LABEL: atomicrmw_umin_i16_release:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    sw s5, 8(sp)
+; RV32I-NEXT:    sw s6, 4(sp)
+; RV32I-NEXT:    mv s2, a1
+; RV32I-NEXT:    mv s5, a0
+; RV32I-NEXT:    lhu a0, 0(a0)
+; RV32I-NEXT:    lui a1, 16
+; RV32I-NEXT:    addi s1, a1, -1
+; RV32I-NEXT:    and s6, s2, s1
+; RV32I-NEXT:    addi s3, sp, 2
+; RV32I-NEXT:    addi s4, zero, 3
+; RV32I-NEXT:  .LBB107_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    and a1, a0, s1
+; RV32I-NEXT:    mv a2, a0
+; RV32I-NEXT:    bgeu s6, a1, .LBB107_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB107_1 Depth=1
+; RV32I-NEXT:    mv a2, s2
+; RV32I-NEXT:  .LBB107_3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB107_1 Depth=1
+; RV32I-NEXT:    sh a0, 2(sp)
+; RV32I-NEXT:    mv a0, s5
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:    mv a3, s4
+; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    call __atomic_compare_exchange_2
+; RV32I-NEXT:    mv a1, a0
+; RV32I-NEXT:    lh a0, 2(sp)
+; RV32I-NEXT:    beqz a1, .LBB107_1
+; RV32I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV32I-NEXT:    lw s6, 4(sp)
+; RV32I-NEXT:    lw s5, 8(sp)
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw umin i16* %a, i16 %b release
+  ret i16 %1
+}
+
+define i16 @atomicrmw_umin_i16_acq_rel(i16 *%a, i16 %b) nounwind {
+; RV32I-LABEL: atomicrmw_umin_i16_acq_rel:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -48
+; RV32I-NEXT:    sw ra, 44(sp)
+; RV32I-NEXT:    sw s1, 40(sp)
+; RV32I-NEXT:    sw s2, 36(sp)
+; RV32I-NEXT:    sw s3, 32(sp)
+; RV32I-NEXT:    sw s4, 28(sp)
+; RV32I-NEXT:    sw s5, 24(sp)
+; RV32I-NEXT:    sw s6, 20(sp)
+; RV32I-NEXT:    sw s7, 16(sp)
+; RV32I-NEXT:    mv s2, a1
+; RV32I-NEXT:    mv s6, a0
+; RV32I-NEXT:    lhu a0, 0(a0)
+; RV32I-NEXT:    lui a1, 16
+; RV32I-NEXT:    addi s1, a1, -1
+; RV32I-NEXT:    and s7, s2, s1
+; RV32I-NEXT:    addi s3, sp, 14
+; RV32I-NEXT:    addi s4, zero, 4
+; RV32I-NEXT:    addi s5, zero, 2
+; RV32I-NEXT:  .LBB108_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    and a1, a0, s1
+; RV32I-NEXT:    mv a2, a0
+; RV32I-NEXT:    bgeu s7, a1, .LBB108_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB108_1 Depth=1
+; RV32I-NEXT:    mv a2, s2
+; RV32I-NEXT:  .LBB108_3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB108_1 Depth=1
+; RV32I-NEXT:    sh a0, 14(sp)
+; RV32I-NEXT:    mv a0, s6
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:    mv a3, s4
+; RV32I-NEXT:    mv a4, s5
+; RV32I-NEXT:    call __atomic_compare_exchange_2
+; RV32I-NEXT:    mv a1, a0
+; RV32I-NEXT:    lh a0, 14(sp)
+; RV32I-NEXT:    beqz a1, .LBB108_1
+; RV32I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV32I-NEXT:    lw s7, 16(sp)
+; RV32I-NEXT:    lw s6, 20(sp)
+; RV32I-NEXT:    lw s5, 24(sp)
+; RV32I-NEXT:    lw s4, 28(sp)
+; RV32I-NEXT:    lw s3, 32(sp)
+; RV32I-NEXT:    lw s2, 36(sp)
+; RV32I-NEXT:    lw s1, 40(sp)
+; RV32I-NEXT:    lw ra, 44(sp)
+; RV32I-NEXT:    addi sp, sp, 48
+; RV32I-NEXT:    ret
+  %1 = atomicrmw umin i16* %a, i16 %b acq_rel
+  ret i16 %1
+}
+
+define i16 @atomicrmw_umin_i16_seq_cst(i16 *%a, i16 %b) nounwind {
+; RV32I-LABEL: atomicrmw_umin_i16_seq_cst:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    sw s5, 8(sp)
+; RV32I-NEXT:    sw s6, 4(sp)
+; RV32I-NEXT:    mv s2, a1
+; RV32I-NEXT:    mv s4, a0
+; RV32I-NEXT:    lhu a0, 0(a0)
+; RV32I-NEXT:    lui a1, 16
+; RV32I-NEXT:    addi s5, a1, -1
+; RV32I-NEXT:    and s6, s2, s5
+; RV32I-NEXT:    addi s3, sp, 2
+; RV32I-NEXT:    addi s1, zero, 5
+; RV32I-NEXT:  .LBB109_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    and a1, a0, s5
+; RV32I-NEXT:    mv a2, a0
+; RV32I-NEXT:    bgeu s6, a1, .LBB109_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB109_1 Depth=1
+; RV32I-NEXT:    mv a2, s2
+; RV32I-NEXT:  .LBB109_3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB109_1 Depth=1
+; RV32I-NEXT:    sh a0, 2(sp)
+; RV32I-NEXT:    mv a0, s4
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:    mv a3, s1
+; RV32I-NEXT:    mv a4, s1
+; RV32I-NEXT:    call __atomic_compare_exchange_2
+; RV32I-NEXT:    mv a1, a0
+; RV32I-NEXT:    lh a0, 2(sp)
+; RV32I-NEXT:    beqz a1, .LBB109_1
+; RV32I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV32I-NEXT:    lw s6, 4(sp)
+; RV32I-NEXT:    lw s5, 8(sp)
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw umin i16* %a, i16 %b seq_cst
+  ret i16 %1
+}
+
+define i32 @atomicrmw_xchg_i32_monotonic(i32* %a, i32 %b) {
+; RV32I-LABEL: atomicrmw_xchg_i32_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    call __atomic_exchange_4
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw xchg i32* %a, i32 %b monotonic
+  ret i32 %1
+}
+
+define i32 @atomicrmw_xchg_i32_acquire(i32* %a, i32 %b) {
+; RV32I-LABEL: atomicrmw_xchg_i32_acquire:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 2
+; RV32I-NEXT:    call __atomic_exchange_4
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw xchg i32* %a, i32 %b acquire
+  ret i32 %1
+}
+
+define i32 @atomicrmw_xchg_i32_release(i32* %a, i32 %b) {
+; RV32I-LABEL: atomicrmw_xchg_i32_release:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 3
+; RV32I-NEXT:    call __atomic_exchange_4
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw xchg i32* %a, i32 %b release
+  ret i32 %1
+}
+
+define i32 @atomicrmw_xchg_i32_acq_rel(i32* %a, i32 %b) {
+; RV32I-LABEL: atomicrmw_xchg_i32_acq_rel:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 4
+; RV32I-NEXT:    call __atomic_exchange_4
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw xchg i32* %a, i32 %b acq_rel
+  ret i32 %1
+}
+
+define i32 @atomicrmw_xchg_i32_seq_cst(i32* %a, i32 %b) {
+; RV32I-LABEL: atomicrmw_xchg_i32_seq_cst:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 5
+; RV32I-NEXT:    call __atomic_exchange_4
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw xchg i32* %a, i32 %b seq_cst
+  ret i32 %1
+}
+
+define i32 @atomicrmw_add_i32_monotonic(i32 *%a, i32 %b) nounwind {
+; RV32I-LABEL: atomicrmw_add_i32_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    call __atomic_fetch_add_4
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw add i32* %a, i32 %b monotonic
+  ret i32 %1
+}
+
+define i32 @atomicrmw_add_i32_acquire(i32 *%a, i32 %b) nounwind {
+; RV32I-LABEL: atomicrmw_add_i32_acquire:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 2
+; RV32I-NEXT:    call __atomic_fetch_add_4
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw add i32* %a, i32 %b acquire
+  ret i32 %1
+}
+
+define i32 @atomicrmw_add_i32_release(i32 *%a, i32 %b) nounwind {
+; RV32I-LABEL: atomicrmw_add_i32_release:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 3
+; RV32I-NEXT:    call __atomic_fetch_add_4
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw add i32* %a, i32 %b release
+  ret i32 %1
+}
+
+define i32 @atomicrmw_add_i32_acq_rel(i32 *%a, i32 %b) nounwind {
+; RV32I-LABEL: atomicrmw_add_i32_acq_rel:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 4
+; RV32I-NEXT:    call __atomic_fetch_add_4
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw add i32* %a, i32 %b acq_rel
+  ret i32 %1
+}
+
+define i32 @atomicrmw_add_i32_seq_cst(i32 *%a, i32 %b) nounwind {
+; RV32I-LABEL: atomicrmw_add_i32_seq_cst:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 5
+; RV32I-NEXT:    call __atomic_fetch_add_4
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw add i32* %a, i32 %b seq_cst
+  ret i32 %1
+}
+
+define i32 @atomicrmw_sub_i32_monotonic(i32* %a, i32 %b) {
+; RV32I-LABEL: atomicrmw_sub_i32_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    call __atomic_fetch_sub_4
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw sub i32* %a, i32 %b monotonic
+  ret i32 %1
+}
+
+define i32 @atomicrmw_sub_i32_acquire(i32* %a, i32 %b) {
+; RV32I-LABEL: atomicrmw_sub_i32_acquire:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 2
+; RV32I-NEXT:    call __atomic_fetch_sub_4
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw sub i32* %a, i32 %b acquire
+  ret i32 %1
+}
+
+define i32 @atomicrmw_sub_i32_release(i32* %a, i32 %b) {
+; RV32I-LABEL: atomicrmw_sub_i32_release:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 3
+; RV32I-NEXT:    call __atomic_fetch_sub_4
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw sub i32* %a, i32 %b release
+  ret i32 %1
+}
+
+define i32 @atomicrmw_sub_i32_acq_rel(i32* %a, i32 %b) {
+; RV32I-LABEL: atomicrmw_sub_i32_acq_rel:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 4
+; RV32I-NEXT:    call __atomic_fetch_sub_4
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw sub i32* %a, i32 %b acq_rel
+  ret i32 %1
+}
+
+define i32 @atomicrmw_sub_i32_seq_cst(i32* %a, i32 %b) {
+; RV32I-LABEL: atomicrmw_sub_i32_seq_cst:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 5
+; RV32I-NEXT:    call __atomic_fetch_sub_4
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw sub i32* %a, i32 %b seq_cst
+  ret i32 %1
+}
+
+define i32 @atomicrmw_and_i32_monotonic(i32 *%a, i32 %b) nounwind {
+; RV32I-LABEL: atomicrmw_and_i32_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    call __atomic_fetch_and_4
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw and i32* %a, i32 %b monotonic
+  ret i32 %1
+}
+
+define i32 @atomicrmw_and_i32_acquire(i32 *%a, i32 %b) nounwind {
+; RV32I-LABEL: atomicrmw_and_i32_acquire:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 2
+; RV32I-NEXT:    call __atomic_fetch_and_4
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw and i32* %a, i32 %b acquire
+  ret i32 %1
+}
+
+define i32 @atomicrmw_and_i32_release(i32 *%a, i32 %b) nounwind {
+; RV32I-LABEL: atomicrmw_and_i32_release:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 3
+; RV32I-NEXT:    call __atomic_fetch_and_4
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw and i32* %a, i32 %b release
+  ret i32 %1
+}
+
+define i32 @atomicrmw_and_i32_acq_rel(i32 *%a, i32 %b) nounwind {
+; RV32I-LABEL: atomicrmw_and_i32_acq_rel:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 4
+; RV32I-NEXT:    call __atomic_fetch_and_4
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw and i32* %a, i32 %b acq_rel
+  ret i32 %1
+}
+
+define i32 @atomicrmw_and_i32_seq_cst(i32 *%a, i32 %b) nounwind {
+; RV32I-LABEL: atomicrmw_and_i32_seq_cst:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 5
+; RV32I-NEXT:    call __atomic_fetch_and_4
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw and i32* %a, i32 %b seq_cst
+  ret i32 %1
+}
+
+define i32 @atomicrmw_nand_i32_monotonic(i32* %a, i32 %b) {
+; RV32I-LABEL: atomicrmw_nand_i32_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    call __atomic_fetch_nand_4
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw nand i32* %a, i32 %b monotonic
+  ret i32 %1
+}
+
+define i32 @atomicrmw_nand_i32_acquire(i32* %a, i32 %b) {
+; RV32I-LABEL: atomicrmw_nand_i32_acquire:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 2
+; RV32I-NEXT:    call __atomic_fetch_nand_4
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw nand i32* %a, i32 %b acquire
+  ret i32 %1
+}
+
+define i32 @atomicrmw_nand_i32_release(i32* %a, i32 %b) {
+; RV32I-LABEL: atomicrmw_nand_i32_release:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 3
+; RV32I-NEXT:    call __atomic_fetch_nand_4
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw nand i32* %a, i32 %b release
+  ret i32 %1
+}
+
+define i32 @atomicrmw_nand_i32_acq_rel(i32* %a, i32 %b) {
+; RV32I-LABEL: atomicrmw_nand_i32_acq_rel:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 4
+; RV32I-NEXT:    call __atomic_fetch_nand_4
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw nand i32* %a, i32 %b acq_rel
+  ret i32 %1
+}
+
+define i32 @atomicrmw_nand_i32_seq_cst(i32* %a, i32 %b) {
+; RV32I-LABEL: atomicrmw_nand_i32_seq_cst:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 5
+; RV32I-NEXT:    call __atomic_fetch_nand_4
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw nand i32* %a, i32 %b seq_cst
+  ret i32 %1
+}
+
+define i32 @atomicrmw_or_i32_monotonic(i32 *%a, i32 %b) nounwind {
+; RV32I-LABEL: atomicrmw_or_i32_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    call __atomic_fetch_or_4
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw or i32* %a, i32 %b monotonic
+  ret i32 %1
+}
+
+define i32 @atomicrmw_or_i32_acquire(i32 *%a, i32 %b) nounwind {
+; RV32I-LABEL: atomicrmw_or_i32_acquire:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 2
+; RV32I-NEXT:    call __atomic_fetch_or_4
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw or i32* %a, i32 %b acquire
+  ret i32 %1
+}
+
+define i32 @atomicrmw_or_i32_release(i32 *%a, i32 %b) nounwind {
+; RV32I-LABEL: atomicrmw_or_i32_release:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 3
+; RV32I-NEXT:    call __atomic_fetch_or_4
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw or i32* %a, i32 %b release
+  ret i32 %1
+}
+
+define i32 @atomicrmw_or_i32_acq_rel(i32 *%a, i32 %b) nounwind {
+; RV32I-LABEL: atomicrmw_or_i32_acq_rel:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 4
+; RV32I-NEXT:    call __atomic_fetch_or_4
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw or i32* %a, i32 %b acq_rel
+  ret i32 %1
+}
+
+define i32 @atomicrmw_or_i32_seq_cst(i32 *%a, i32 %b) nounwind {
+; RV32I-LABEL: atomicrmw_or_i32_seq_cst:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 5
+; RV32I-NEXT:    call __atomic_fetch_or_4
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw or i32* %a, i32 %b seq_cst
+  ret i32 %1
+}
+
+define i32 @atomicrmw_xor_i32_monotonic(i32 *%a, i32 %b) nounwind {
+; RV32I-LABEL: atomicrmw_xor_i32_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    call __atomic_fetch_xor_4
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw xor i32* %a, i32 %b monotonic
+  ret i32 %1
+}
+
+define i32 @atomicrmw_xor_i32_acquire(i32 *%a, i32 %b) nounwind {
+; RV32I-LABEL: atomicrmw_xor_i32_acquire:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 2
+; RV32I-NEXT:    call __atomic_fetch_xor_4
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw xor i32* %a, i32 %b acquire
+  ret i32 %1
+}
+
+define i32 @atomicrmw_xor_i32_release(i32 *%a, i32 %b) nounwind {
+; RV32I-LABEL: atomicrmw_xor_i32_release:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 3
+; RV32I-NEXT:    call __atomic_fetch_xor_4
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw xor i32* %a, i32 %b release
+  ret i32 %1
+}
+
+define i32 @atomicrmw_xor_i32_acq_rel(i32 *%a, i32 %b) nounwind {
+; RV32I-LABEL: atomicrmw_xor_i32_acq_rel:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 4
+; RV32I-NEXT:    call __atomic_fetch_xor_4
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw xor i32* %a, i32 %b acq_rel
+  ret i32 %1
+}
+
+define i32 @atomicrmw_xor_i32_seq_cst(i32 *%a, i32 %b) nounwind {
+; RV32I-LABEL: atomicrmw_xor_i32_seq_cst:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a2, zero, 5
+; RV32I-NEXT:    call __atomic_fetch_xor_4
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw xor i32* %a, i32 %b seq_cst
+  ret i32 %1
+}
+
+define i32 @atomicrmw_max_i32_monotonic(i32 *%a, i32 %b) nounwind {
+; RV32I-LABEL: atomicrmw_max_i32_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    mv s1, a1
+; RV32I-NEXT:    mv s2, a0
+; RV32I-NEXT:    lw a2, 0(a0)
+; RV32I-NEXT:    addi s3, sp, 12
+; RV32I-NEXT:  .LBB145_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    sw a2, 12(sp)
+; RV32I-NEXT:    blt s1, a2, .LBB145_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB145_1 Depth=1
+; RV32I-NEXT:    mv a2, s1
+; RV32I-NEXT:  .LBB145_3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB145_1 Depth=1
+; RV32I-NEXT:    mv a0, s2
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    call __atomic_compare_exchange_4
+; RV32I-NEXT:    lw a2, 12(sp)
+; RV32I-NEXT:    beqz a0, .LBB145_1
+; RV32I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV32I-NEXT:    mv a0, a2
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw max i32* %a, i32 %b monotonic
+  ret i32 %1
+}
+
+define i32 @atomicrmw_max_i32_acquire(i32 *%a, i32 %b) nounwind {
+; RV32I-LABEL: atomicrmw_max_i32_acquire:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    mv s1, a1
+; RV32I-NEXT:    mv s2, a0
+; RV32I-NEXT:    lw a2, 0(a0)
+; RV32I-NEXT:    addi s3, sp, 8
+; RV32I-NEXT:    addi s4, zero, 2
+; RV32I-NEXT:  .LBB146_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    sw a2, 8(sp)
+; RV32I-NEXT:    blt s1, a2, .LBB146_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB146_1 Depth=1
+; RV32I-NEXT:    mv a2, s1
+; RV32I-NEXT:  .LBB146_3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB146_1 Depth=1
+; RV32I-NEXT:    mv a0, s2
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:    mv a3, s4
+; RV32I-NEXT:    mv a4, s4
+; RV32I-NEXT:    call __atomic_compare_exchange_4
+; RV32I-NEXT:    lw a2, 8(sp)
+; RV32I-NEXT:    beqz a0, .LBB146_1
+; RV32I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV32I-NEXT:    mv a0, a2
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw max i32* %a, i32 %b acquire
+  ret i32 %1
+}
+
+define i32 @atomicrmw_max_i32_release(i32 *%a, i32 %b) nounwind {
+; RV32I-LABEL: atomicrmw_max_i32_release:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    mv s1, a1
+; RV32I-NEXT:    mv s2, a0
+; RV32I-NEXT:    lw a2, 0(a0)
+; RV32I-NEXT:    addi s3, sp, 8
+; RV32I-NEXT:    addi s4, zero, 3
+; RV32I-NEXT:  .LBB147_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    sw a2, 8(sp)
+; RV32I-NEXT:    blt s1, a2, .LBB147_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB147_1 Depth=1
+; RV32I-NEXT:    mv a2, s1
+; RV32I-NEXT:  .LBB147_3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB147_1 Depth=1
+; RV32I-NEXT:    mv a0, s2
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:    mv a3, s4
+; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    call __atomic_compare_exchange_4
+; RV32I-NEXT:    lw a2, 8(sp)
+; RV32I-NEXT:    beqz a0, .LBB147_1
+; RV32I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV32I-NEXT:    mv a0, a2
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw max i32* %a, i32 %b release
+  ret i32 %1
+}
+
+define i32 @atomicrmw_max_i32_acq_rel(i32 *%a, i32 %b) nounwind {
+; RV32I-LABEL: atomicrmw_max_i32_acq_rel:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    sw s5, 8(sp)
+; RV32I-NEXT:    mv s1, a1
+; RV32I-NEXT:    mv s2, a0
+; RV32I-NEXT:    lw a2, 0(a0)
+; RV32I-NEXT:    addi s3, sp, 4
+; RV32I-NEXT:    addi s4, zero, 4
+; RV32I-NEXT:    addi s5, zero, 2
+; RV32I-NEXT:  .LBB148_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    sw a2, 4(sp)
+; RV32I-NEXT:    blt s1, a2, .LBB148_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB148_1 Depth=1
+; RV32I-NEXT:    mv a2, s1
+; RV32I-NEXT:  .LBB148_3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB148_1 Depth=1
+; RV32I-NEXT:    mv a0, s2
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:    mv a3, s4
+; RV32I-NEXT:    mv a4, s5
+; RV32I-NEXT:    call __atomic_compare_exchange_4
+; RV32I-NEXT:    lw a2, 4(sp)
+; RV32I-NEXT:    beqz a0, .LBB148_1
+; RV32I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV32I-NEXT:    mv a0, a2
+; RV32I-NEXT:    lw s5, 8(sp)
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw max i32* %a, i32 %b acq_rel
+  ret i32 %1
+}
+
+define i32 @atomicrmw_max_i32_seq_cst(i32 *%a, i32 %b) nounwind {
+; RV32I-LABEL: atomicrmw_max_i32_seq_cst:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    mv s1, a1
+; RV32I-NEXT:    mv s2, a0
+; RV32I-NEXT:    lw a2, 0(a0)
+; RV32I-NEXT:    addi s3, sp, 8
+; RV32I-NEXT:    addi s4, zero, 5
+; RV32I-NEXT:  .LBB149_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    sw a2, 8(sp)
+; RV32I-NEXT:    blt s1, a2, .LBB149_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB149_1 Depth=1
+; RV32I-NEXT:    mv a2, s1
+; RV32I-NEXT:  .LBB149_3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB149_1 Depth=1
+; RV32I-NEXT:    mv a0, s2
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:    mv a3, s4
+; RV32I-NEXT:    mv a4, s4
+; RV32I-NEXT:    call __atomic_compare_exchange_4
+; RV32I-NEXT:    lw a2, 8(sp)
+; RV32I-NEXT:    beqz a0, .LBB149_1
+; RV32I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV32I-NEXT:    mv a0, a2
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw max i32* %a, i32 %b seq_cst
+  ret i32 %1
+}
+
+define i32 @atomicrmw_min_i32_monotonic(i32 *%a, i32 %b) nounwind {
+; RV32I-LABEL: atomicrmw_min_i32_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    mv s1, a1
+; RV32I-NEXT:    mv s2, a0
+; RV32I-NEXT:    lw a2, 0(a0)
+; RV32I-NEXT:    addi s3, sp, 12
+; RV32I-NEXT:  .LBB150_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    sw a2, 12(sp)
+; RV32I-NEXT:    bge s1, a2, .LBB150_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB150_1 Depth=1
+; RV32I-NEXT:    mv a2, s1
+; RV32I-NEXT:  .LBB150_3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB150_1 Depth=1
+; RV32I-NEXT:    mv a0, s2
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    call __atomic_compare_exchange_4
+; RV32I-NEXT:    lw a2, 12(sp)
+; RV32I-NEXT:    beqz a0, .LBB150_1
+; RV32I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV32I-NEXT:    mv a0, a2
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw min i32* %a, i32 %b monotonic
+  ret i32 %1
+}
+
+define i32 @atomicrmw_min_i32_acquire(i32 *%a, i32 %b) nounwind {
+; RV32I-LABEL: atomicrmw_min_i32_acquire:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    mv s1, a1
+; RV32I-NEXT:    mv s2, a0
+; RV32I-NEXT:    lw a2, 0(a0)
+; RV32I-NEXT:    addi s3, sp, 8
+; RV32I-NEXT:    addi s4, zero, 2
+; RV32I-NEXT:  .LBB151_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    sw a2, 8(sp)
+; RV32I-NEXT:    bge s1, a2, .LBB151_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB151_1 Depth=1
+; RV32I-NEXT:    mv a2, s1
+; RV32I-NEXT:  .LBB151_3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB151_1 Depth=1
+; RV32I-NEXT:    mv a0, s2
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:    mv a3, s4
+; RV32I-NEXT:    mv a4, s4
+; RV32I-NEXT:    call __atomic_compare_exchange_4
+; RV32I-NEXT:    lw a2, 8(sp)
+; RV32I-NEXT:    beqz a0, .LBB151_1
+; RV32I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV32I-NEXT:    mv a0, a2
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw min i32* %a, i32 %b acquire
+  ret i32 %1
+}
+
+define i32 @atomicrmw_min_i32_release(i32 *%a, i32 %b) nounwind {
+; RV32I-LABEL: atomicrmw_min_i32_release:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    mv s1, a1
+; RV32I-NEXT:    mv s2, a0
+; RV32I-NEXT:    lw a2, 0(a0)
+; RV32I-NEXT:    addi s3, sp, 8
+; RV32I-NEXT:    addi s4, zero, 3
+; RV32I-NEXT:  .LBB152_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    sw a2, 8(sp)
+; RV32I-NEXT:    bge s1, a2, .LBB152_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB152_1 Depth=1
+; RV32I-NEXT:    mv a2, s1
+; RV32I-NEXT:  .LBB152_3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB152_1 Depth=1
+; RV32I-NEXT:    mv a0, s2
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:    mv a3, s4
+; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    call __atomic_compare_exchange_4
+; RV32I-NEXT:    lw a2, 8(sp)
+; RV32I-NEXT:    beqz a0, .LBB152_1
+; RV32I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV32I-NEXT:    mv a0, a2
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw min i32* %a, i32 %b release
+  ret i32 %1
+}
+
+define i32 @atomicrmw_min_i32_acq_rel(i32 *%a, i32 %b) nounwind {
+; RV32I-LABEL: atomicrmw_min_i32_acq_rel:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    sw s5, 8(sp)
+; RV32I-NEXT:    mv s1, a1
+; RV32I-NEXT:    mv s2, a0
+; RV32I-NEXT:    lw a2, 0(a0)
+; RV32I-NEXT:    addi s3, sp, 4
+; RV32I-NEXT:    addi s4, zero, 4
+; RV32I-NEXT:    addi s5, zero, 2
+; RV32I-NEXT:  .LBB153_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    sw a2, 4(sp)
+; RV32I-NEXT:    bge s1, a2, .LBB153_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB153_1 Depth=1
+; RV32I-NEXT:    mv a2, s1
+; RV32I-NEXT:  .LBB153_3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB153_1 Depth=1
+; RV32I-NEXT:    mv a0, s2
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:    mv a3, s4
+; RV32I-NEXT:    mv a4, s5
+; RV32I-NEXT:    call __atomic_compare_exchange_4
+; RV32I-NEXT:    lw a2, 4(sp)
+; RV32I-NEXT:    beqz a0, .LBB153_1
+; RV32I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV32I-NEXT:    mv a0, a2
+; RV32I-NEXT:    lw s5, 8(sp)
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw min i32* %a, i32 %b acq_rel
+  ret i32 %1
+}
+
+define i32 @atomicrmw_min_i32_seq_cst(i32 *%a, i32 %b) nounwind {
+; RV32I-LABEL: atomicrmw_min_i32_seq_cst:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    mv s1, a1
+; RV32I-NEXT:    mv s2, a0
+; RV32I-NEXT:    lw a2, 0(a0)
+; RV32I-NEXT:    addi s3, sp, 8
+; RV32I-NEXT:    addi s4, zero, 5
+; RV32I-NEXT:  .LBB154_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    sw a2, 8(sp)
+; RV32I-NEXT:    bge s1, a2, .LBB154_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB154_1 Depth=1
+; RV32I-NEXT:    mv a2, s1
+; RV32I-NEXT:  .LBB154_3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB154_1 Depth=1
+; RV32I-NEXT:    mv a0, s2
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:    mv a3, s4
+; RV32I-NEXT:    mv a4, s4
+; RV32I-NEXT:    call __atomic_compare_exchange_4
+; RV32I-NEXT:    lw a2, 8(sp)
+; RV32I-NEXT:    beqz a0, .LBB154_1
+; RV32I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV32I-NEXT:    mv a0, a2
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw min i32* %a, i32 %b seq_cst
+  ret i32 %1
+}
+
+define i32 @atomicrmw_umax_i32_monotonic(i32 *%a, i32 %b) nounwind {
+; RV32I-LABEL: atomicrmw_umax_i32_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    mv s1, a1
+; RV32I-NEXT:    mv s2, a0
+; RV32I-NEXT:    lw a2, 0(a0)
+; RV32I-NEXT:    addi s3, sp, 12
+; RV32I-NEXT:  .LBB155_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    sw a2, 12(sp)
+; RV32I-NEXT:    bltu s1, a2, .LBB155_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB155_1 Depth=1
+; RV32I-NEXT:    mv a2, s1
+; RV32I-NEXT:  .LBB155_3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB155_1 Depth=1
+; RV32I-NEXT:    mv a0, s2
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    call __atomic_compare_exchange_4
+; RV32I-NEXT:    lw a2, 12(sp)
+; RV32I-NEXT:    beqz a0, .LBB155_1
+; RV32I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV32I-NEXT:    mv a0, a2
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw umax i32* %a, i32 %b monotonic
+  ret i32 %1
+}
+
+define i32 @atomicrmw_umax_i32_acquire(i32 *%a, i32 %b) nounwind {
+; RV32I-LABEL: atomicrmw_umax_i32_acquire:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    mv s1, a1
+; RV32I-NEXT:    mv s2, a0
+; RV32I-NEXT:    lw a2, 0(a0)
+; RV32I-NEXT:    addi s3, sp, 8
+; RV32I-NEXT:    addi s4, zero, 2
+; RV32I-NEXT:  .LBB156_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    sw a2, 8(sp)
+; RV32I-NEXT:    bltu s1, a2, .LBB156_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB156_1 Depth=1
+; RV32I-NEXT:    mv a2, s1
+; RV32I-NEXT:  .LBB156_3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB156_1 Depth=1
+; RV32I-NEXT:    mv a0, s2
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:    mv a3, s4
+; RV32I-NEXT:    mv a4, s4
+; RV32I-NEXT:    call __atomic_compare_exchange_4
+; RV32I-NEXT:    lw a2, 8(sp)
+; RV32I-NEXT:    beqz a0, .LBB156_1
+; RV32I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV32I-NEXT:    mv a0, a2
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw umax i32* %a, i32 %b acquire
+  ret i32 %1
+}
+
+define i32 @atomicrmw_umax_i32_release(i32 *%a, i32 %b) nounwind {
+; RV32I-LABEL: atomicrmw_umax_i32_release:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    mv s1, a1
+; RV32I-NEXT:    mv s2, a0
+; RV32I-NEXT:    lw a2, 0(a0)
+; RV32I-NEXT:    addi s3, sp, 8
+; RV32I-NEXT:    addi s4, zero, 3
+; RV32I-NEXT:  .LBB157_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    sw a2, 8(sp)
+; RV32I-NEXT:    bltu s1, a2, .LBB157_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB157_1 Depth=1
+; RV32I-NEXT:    mv a2, s1
+; RV32I-NEXT:  .LBB157_3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB157_1 Depth=1
+; RV32I-NEXT:    mv a0, s2
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:    mv a3, s4
+; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    call __atomic_compare_exchange_4
+; RV32I-NEXT:    lw a2, 8(sp)
+; RV32I-NEXT:    beqz a0, .LBB157_1
+; RV32I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV32I-NEXT:    mv a0, a2
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw umax i32* %a, i32 %b release
+  ret i32 %1
+}
+
+define i32 @atomicrmw_umax_i32_acq_rel(i32 *%a, i32 %b) nounwind {
+; RV32I-LABEL: atomicrmw_umax_i32_acq_rel:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    sw s5, 8(sp)
+; RV32I-NEXT:    mv s1, a1
+; RV32I-NEXT:    mv s2, a0
+; RV32I-NEXT:    lw a2, 0(a0)
+; RV32I-NEXT:    addi s3, sp, 4
+; RV32I-NEXT:    addi s4, zero, 4
+; RV32I-NEXT:    addi s5, zero, 2
+; RV32I-NEXT:  .LBB158_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    sw a2, 4(sp)
+; RV32I-NEXT:    bltu s1, a2, .LBB158_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB158_1 Depth=1
+; RV32I-NEXT:    mv a2, s1
+; RV32I-NEXT:  .LBB158_3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB158_1 Depth=1
+; RV32I-NEXT:    mv a0, s2
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:    mv a3, s4
+; RV32I-NEXT:    mv a4, s5
+; RV32I-NEXT:    call __atomic_compare_exchange_4
+; RV32I-NEXT:    lw a2, 4(sp)
+; RV32I-NEXT:    beqz a0, .LBB158_1
+; RV32I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV32I-NEXT:    mv a0, a2
+; RV32I-NEXT:    lw s5, 8(sp)
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw umax i32* %a, i32 %b acq_rel
+  ret i32 %1
+}
+
+define i32 @atomicrmw_umax_i32_seq_cst(i32 *%a, i32 %b) nounwind {
+; RV32I-LABEL: atomicrmw_umax_i32_seq_cst:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    mv s1, a1
+; RV32I-NEXT:    mv s2, a0
+; RV32I-NEXT:    lw a2, 0(a0)
+; RV32I-NEXT:    addi s3, sp, 8
+; RV32I-NEXT:    addi s4, zero, 5
+; RV32I-NEXT:  .LBB159_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    sw a2, 8(sp)
+; RV32I-NEXT:    bltu s1, a2, .LBB159_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB159_1 Depth=1
+; RV32I-NEXT:    mv a2, s1
+; RV32I-NEXT:  .LBB159_3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB159_1 Depth=1
+; RV32I-NEXT:    mv a0, s2
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:    mv a3, s4
+; RV32I-NEXT:    mv a4, s4
+; RV32I-NEXT:    call __atomic_compare_exchange_4
+; RV32I-NEXT:    lw a2, 8(sp)
+; RV32I-NEXT:    beqz a0, .LBB159_1
+; RV32I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV32I-NEXT:    mv a0, a2
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw umax i32* %a, i32 %b seq_cst
+  ret i32 %1
+}
+
+define i32 @atomicrmw_umin_i32_monotonic(i32 *%a, i32 %b) nounwind {
+; RV32I-LABEL: atomicrmw_umin_i32_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    mv s1, a1
+; RV32I-NEXT:    mv s2, a0
+; RV32I-NEXT:    lw a2, 0(a0)
+; RV32I-NEXT:    addi s3, sp, 12
+; RV32I-NEXT:  .LBB160_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    sw a2, 12(sp)
+; RV32I-NEXT:    bgeu s1, a2, .LBB160_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB160_1 Depth=1
+; RV32I-NEXT:    mv a2, s1
+; RV32I-NEXT:  .LBB160_3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB160_1 Depth=1
+; RV32I-NEXT:    mv a0, s2
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    call __atomic_compare_exchange_4
+; RV32I-NEXT:    lw a2, 12(sp)
+; RV32I-NEXT:    beqz a0, .LBB160_1
+; RV32I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV32I-NEXT:    mv a0, a2
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw umin i32* %a, i32 %b monotonic
+  ret i32 %1
+}
+
+define i32 @atomicrmw_umin_i32_acquire(i32 *%a, i32 %b) nounwind {
+; RV32I-LABEL: atomicrmw_umin_i32_acquire:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    mv s1, a1
+; RV32I-NEXT:    mv s2, a0
+; RV32I-NEXT:    lw a2, 0(a0)
+; RV32I-NEXT:    addi s3, sp, 8
+; RV32I-NEXT:    addi s4, zero, 2
+; RV32I-NEXT:  .LBB161_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    sw a2, 8(sp)
+; RV32I-NEXT:    bgeu s1, a2, .LBB161_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB161_1 Depth=1
+; RV32I-NEXT:    mv a2, s1
+; RV32I-NEXT:  .LBB161_3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB161_1 Depth=1
+; RV32I-NEXT:    mv a0, s2
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:    mv a3, s4
+; RV32I-NEXT:    mv a4, s4
+; RV32I-NEXT:    call __atomic_compare_exchange_4
+; RV32I-NEXT:    lw a2, 8(sp)
+; RV32I-NEXT:    beqz a0, .LBB161_1
+; RV32I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV32I-NEXT:    mv a0, a2
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw umin i32* %a, i32 %b acquire
+  ret i32 %1
+}
+
+define i32 @atomicrmw_umin_i32_release(i32 *%a, i32 %b) nounwind {
+; RV32I-LABEL: atomicrmw_umin_i32_release:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    mv s1, a1
+; RV32I-NEXT:    mv s2, a0
+; RV32I-NEXT:    lw a2, 0(a0)
+; RV32I-NEXT:    addi s3, sp, 8
+; RV32I-NEXT:    addi s4, zero, 3
+; RV32I-NEXT:  .LBB162_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    sw a2, 8(sp)
+; RV32I-NEXT:    bgeu s1, a2, .LBB162_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB162_1 Depth=1
+; RV32I-NEXT:    mv a2, s1
+; RV32I-NEXT:  .LBB162_3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB162_1 Depth=1
+; RV32I-NEXT:    mv a0, s2
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:    mv a3, s4
+; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    call __atomic_compare_exchange_4
+; RV32I-NEXT:    lw a2, 8(sp)
+; RV32I-NEXT:    beqz a0, .LBB162_1
+; RV32I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV32I-NEXT:    mv a0, a2
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw umin i32* %a, i32 %b release
+  ret i32 %1
+}
+
+define i32 @atomicrmw_umin_i32_acq_rel(i32 *%a, i32 %b) nounwind {
+; RV32I-LABEL: atomicrmw_umin_i32_acq_rel:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    sw s5, 8(sp)
+; RV32I-NEXT:    mv s1, a1
+; RV32I-NEXT:    mv s2, a0
+; RV32I-NEXT:    lw a2, 0(a0)
+; RV32I-NEXT:    addi s3, sp, 4
+; RV32I-NEXT:    addi s4, zero, 4
+; RV32I-NEXT:    addi s5, zero, 2
+; RV32I-NEXT:  .LBB163_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    sw a2, 4(sp)
+; RV32I-NEXT:    bgeu s1, a2, .LBB163_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB163_1 Depth=1
+; RV32I-NEXT:    mv a2, s1
+; RV32I-NEXT:  .LBB163_3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB163_1 Depth=1
+; RV32I-NEXT:    mv a0, s2
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:    mv a3, s4
+; RV32I-NEXT:    mv a4, s5
+; RV32I-NEXT:    call __atomic_compare_exchange_4
+; RV32I-NEXT:    lw a2, 4(sp)
+; RV32I-NEXT:    beqz a0, .LBB163_1
+; RV32I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV32I-NEXT:    mv a0, a2
+; RV32I-NEXT:    lw s5, 8(sp)
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw umin i32* %a, i32 %b acq_rel
+  ret i32 %1
+}
+
+define i32 @atomicrmw_umin_i32_seq_cst(i32 *%a, i32 %b) nounwind {
+; RV32I-LABEL: atomicrmw_umin_i32_seq_cst:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    mv s1, a1
+; RV32I-NEXT:    mv s2, a0
+; RV32I-NEXT:    lw a2, 0(a0)
+; RV32I-NEXT:    addi s3, sp, 8
+; RV32I-NEXT:    addi s4, zero, 5
+; RV32I-NEXT:  .LBB164_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    sw a2, 8(sp)
+; RV32I-NEXT:    bgeu s1, a2, .LBB164_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB164_1 Depth=1
+; RV32I-NEXT:    mv a2, s1
+; RV32I-NEXT:  .LBB164_3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB164_1 Depth=1
+; RV32I-NEXT:    mv a0, s2
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:    mv a3, s4
+; RV32I-NEXT:    mv a4, s4
+; RV32I-NEXT:    call __atomic_compare_exchange_4
+; RV32I-NEXT:    lw a2, 8(sp)
+; RV32I-NEXT:    beqz a0, .LBB164_1
+; RV32I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV32I-NEXT:    mv a0, a2
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw umin i32* %a, i32 %b seq_cst
+  ret i32 %1
+}
+
+define i64 @atomicrmw_xchg_i64_monotonic(i64* %a, i64 %b) {
+; RV32I-LABEL: atomicrmw_xchg_i64_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    call __atomic_exchange_8
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw xchg i64* %a, i64 %b monotonic
+  ret i64 %1
+}
+
+define i64 @atomicrmw_xchg_i64_acquire(i64* %a, i64 %b) {
+; RV32I-LABEL: atomicrmw_xchg_i64_acquire:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a3, zero, 2
+; RV32I-NEXT:    call __atomic_exchange_8
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw xchg i64* %a, i64 %b acquire
+  ret i64 %1
+}
+
+define i64 @atomicrmw_xchg_i64_release(i64* %a, i64 %b) {
+; RV32I-LABEL: atomicrmw_xchg_i64_release:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a3, zero, 3
+; RV32I-NEXT:    call __atomic_exchange_8
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw xchg i64* %a, i64 %b release
+  ret i64 %1
+}
+
+define i64 @atomicrmw_xchg_i64_acq_rel(i64* %a, i64 %b) {
+; RV32I-LABEL: atomicrmw_xchg_i64_acq_rel:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a3, zero, 4
+; RV32I-NEXT:    call __atomic_exchange_8
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw xchg i64* %a, i64 %b acq_rel
+  ret i64 %1
+}
+
+define i64 @atomicrmw_xchg_i64_seq_cst(i64* %a, i64 %b) {
+; RV32I-LABEL: atomicrmw_xchg_i64_seq_cst:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a3, zero, 5
+; RV32I-NEXT:    call __atomic_exchange_8
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw xchg i64* %a, i64 %b seq_cst
+  ret i64 %1
+}
+
+define i64 @atomicrmw_add_i64_monotonic(i64 *%a, i64 %b) nounwind {
+; RV32I-LABEL: atomicrmw_add_i64_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    call __atomic_fetch_add_8
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw add i64* %a, i64 %b monotonic
+  ret i64 %1
+}
+
+define i64 @atomicrmw_add_i64_acquire(i64 *%a, i64 %b) nounwind {
+; RV32I-LABEL: atomicrmw_add_i64_acquire:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a3, zero, 2
+; RV32I-NEXT:    call __atomic_fetch_add_8
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw add i64* %a, i64 %b acquire
+  ret i64 %1
+}
+
+define i64 @atomicrmw_add_i64_release(i64 *%a, i64 %b) nounwind {
+; RV32I-LABEL: atomicrmw_add_i64_release:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a3, zero, 3
+; RV32I-NEXT:    call __atomic_fetch_add_8
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw add i64* %a, i64 %b release
+  ret i64 %1
+}
+
+define i64 @atomicrmw_add_i64_acq_rel(i64 *%a, i64 %b) nounwind {
+; RV32I-LABEL: atomicrmw_add_i64_acq_rel:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a3, zero, 4
+; RV32I-NEXT:    call __atomic_fetch_add_8
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw add i64* %a, i64 %b acq_rel
+  ret i64 %1
+}
+
+define i64 @atomicrmw_add_i64_seq_cst(i64 *%a, i64 %b) nounwind {
+; RV32I-LABEL: atomicrmw_add_i64_seq_cst:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a3, zero, 5
+; RV32I-NEXT:    call __atomic_fetch_add_8
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw add i64* %a, i64 %b seq_cst
+  ret i64 %1
+}
+
+define i64 @atomicrmw_sub_i64_monotonic(i64* %a, i64 %b) {
+; RV32I-LABEL: atomicrmw_sub_i64_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    call __atomic_fetch_sub_8
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw sub i64* %a, i64 %b monotonic
+  ret i64 %1
+}
+
+define i64 @atomicrmw_sub_i64_acquire(i64* %a, i64 %b) {
+; RV32I-LABEL: atomicrmw_sub_i64_acquire:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a3, zero, 2
+; RV32I-NEXT:    call __atomic_fetch_sub_8
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw sub i64* %a, i64 %b acquire
+  ret i64 %1
+}
+
+define i64 @atomicrmw_sub_i64_release(i64* %a, i64 %b) {
+; RV32I-LABEL: atomicrmw_sub_i64_release:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a3, zero, 3
+; RV32I-NEXT:    call __atomic_fetch_sub_8
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw sub i64* %a, i64 %b release
+  ret i64 %1
+}
+
+define i64 @atomicrmw_sub_i64_acq_rel(i64* %a, i64 %b) {
+; RV32I-LABEL: atomicrmw_sub_i64_acq_rel:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a3, zero, 4
+; RV32I-NEXT:    call __atomic_fetch_sub_8
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw sub i64* %a, i64 %b acq_rel
+  ret i64 %1
+}
+
+define i64 @atomicrmw_sub_i64_seq_cst(i64* %a, i64 %b) {
+; RV32I-LABEL: atomicrmw_sub_i64_seq_cst:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a3, zero, 5
+; RV32I-NEXT:    call __atomic_fetch_sub_8
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw sub i64* %a, i64 %b seq_cst
+  ret i64 %1
+}
+
+define i64 @atomicrmw_and_i64_monotonic(i64 *%a, i64 %b) nounwind {
+; RV32I-LABEL: atomicrmw_and_i64_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    call __atomic_fetch_and_8
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw and i64* %a, i64 %b monotonic
+  ret i64 %1
+}
+
+define i64 @atomicrmw_and_i64_acquire(i64 *%a, i64 %b) nounwind {
+; RV32I-LABEL: atomicrmw_and_i64_acquire:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a3, zero, 2
+; RV32I-NEXT:    call __atomic_fetch_and_8
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw and i64* %a, i64 %b acquire
+  ret i64 %1
+}
+
+define i64 @atomicrmw_and_i64_release(i64 *%a, i64 %b) nounwind {
+; RV32I-LABEL: atomicrmw_and_i64_release:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a3, zero, 3
+; RV32I-NEXT:    call __atomic_fetch_and_8
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw and i64* %a, i64 %b release
+  ret i64 %1
+}
+
+define i64 @atomicrmw_and_i64_acq_rel(i64 *%a, i64 %b) nounwind {
+; RV32I-LABEL: atomicrmw_and_i64_acq_rel:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a3, zero, 4
+; RV32I-NEXT:    call __atomic_fetch_and_8
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw and i64* %a, i64 %b acq_rel
+  ret i64 %1
+}
+
+define i64 @atomicrmw_and_i64_seq_cst(i64 *%a, i64 %b) nounwind {
+; RV32I-LABEL: atomicrmw_and_i64_seq_cst:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a3, zero, 5
+; RV32I-NEXT:    call __atomic_fetch_and_8
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw and i64* %a, i64 %b seq_cst
+  ret i64 %1
+}
+
+define i64 @atomicrmw_nand_i64_monotonic(i64* %a, i64 %b) {
+; RV32I-LABEL: atomicrmw_nand_i64_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    call __atomic_fetch_nand_8
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw nand i64* %a, i64 %b monotonic
+  ret i64 %1
+}
+
+define i64 @atomicrmw_nand_i64_acquire(i64* %a, i64 %b) {
+; RV32I-LABEL: atomicrmw_nand_i64_acquire:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a3, zero, 2
+; RV32I-NEXT:    call __atomic_fetch_nand_8
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw nand i64* %a, i64 %b acquire
+  ret i64 %1
+}
+
+define i64 @atomicrmw_nand_i64_release(i64* %a, i64 %b) {
+; RV32I-LABEL: atomicrmw_nand_i64_release:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a3, zero, 3
+; RV32I-NEXT:    call __atomic_fetch_nand_8
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw nand i64* %a, i64 %b release
+  ret i64 %1
+}
+
+define i64 @atomicrmw_nand_i64_acq_rel(i64* %a, i64 %b) {
+; RV32I-LABEL: atomicrmw_nand_i64_acq_rel:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a3, zero, 4
+; RV32I-NEXT:    call __atomic_fetch_nand_8
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw nand i64* %a, i64 %b acq_rel
+  ret i64 %1
+}
+
+define i64 @atomicrmw_nand_i64_seq_cst(i64* %a, i64 %b) {
+; RV32I-LABEL: atomicrmw_nand_i64_seq_cst:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a3, zero, 5
+; RV32I-NEXT:    call __atomic_fetch_nand_8
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw nand i64* %a, i64 %b seq_cst
+  ret i64 %1
+}
+
+define i64 @atomicrmw_or_i64_monotonic(i64 *%a, i64 %b) nounwind {
+; RV32I-LABEL: atomicrmw_or_i64_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    call __atomic_fetch_or_8
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw or i64* %a, i64 %b monotonic
+  ret i64 %1
+}
+
+define i64 @atomicrmw_or_i64_acquire(i64 *%a, i64 %b) nounwind {
+; RV32I-LABEL: atomicrmw_or_i64_acquire:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a3, zero, 2
+; RV32I-NEXT:    call __atomic_fetch_or_8
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw or i64* %a, i64 %b acquire
+  ret i64 %1
+}
+
+define i64 @atomicrmw_or_i64_release(i64 *%a, i64 %b) nounwind {
+; RV32I-LABEL: atomicrmw_or_i64_release:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a3, zero, 3
+; RV32I-NEXT:    call __atomic_fetch_or_8
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw or i64* %a, i64 %b release
+  ret i64 %1
+}
+
+define i64 @atomicrmw_or_i64_acq_rel(i64 *%a, i64 %b) nounwind {
+; RV32I-LABEL: atomicrmw_or_i64_acq_rel:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a3, zero, 4
+; RV32I-NEXT:    call __atomic_fetch_or_8
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw or i64* %a, i64 %b acq_rel
+  ret i64 %1
+}
+
+define i64 @atomicrmw_or_i64_seq_cst(i64 *%a, i64 %b) nounwind {
+; RV32I-LABEL: atomicrmw_or_i64_seq_cst:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a3, zero, 5
+; RV32I-NEXT:    call __atomic_fetch_or_8
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw or i64* %a, i64 %b seq_cst
+  ret i64 %1
+}
+
+define i64 @atomicrmw_xor_i64_monotonic(i64 *%a, i64 %b) nounwind {
+; RV32I-LABEL: atomicrmw_xor_i64_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    call __atomic_fetch_xor_8
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw xor i64* %a, i64 %b monotonic
+  ret i64 %1
+}
+
+define i64 @atomicrmw_xor_i64_acquire(i64 *%a, i64 %b) nounwind {
+; RV32I-LABEL: atomicrmw_xor_i64_acquire:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a3, zero, 2
+; RV32I-NEXT:    call __atomic_fetch_xor_8
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw xor i64* %a, i64 %b acquire
+  ret i64 %1
+}
+
+define i64 @atomicrmw_xor_i64_release(i64 *%a, i64 %b) nounwind {
+; RV32I-LABEL: atomicrmw_xor_i64_release:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a3, zero, 3
+; RV32I-NEXT:    call __atomic_fetch_xor_8
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw xor i64* %a, i64 %b release
+  ret i64 %1
+}
+
+define i64 @atomicrmw_xor_i64_acq_rel(i64 *%a, i64 %b) nounwind {
+; RV32I-LABEL: atomicrmw_xor_i64_acq_rel:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a3, zero, 4
+; RV32I-NEXT:    call __atomic_fetch_xor_8
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw xor i64* %a, i64 %b acq_rel
+  ret i64 %1
+}
+
+define i64 @atomicrmw_xor_i64_seq_cst(i64 *%a, i64 %b) nounwind {
+; RV32I-LABEL: atomicrmw_xor_i64_seq_cst:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    addi a3, zero, 5
+; RV32I-NEXT:    call __atomic_fetch_xor_8
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = atomicrmw xor i64* %a, i64 %b seq_cst
+  ret i64 %1
+}
+
+define i64 @atomicrmw_max_i64_monotonic(i64 *%a, i64 %b) nounwind {
+; RV32I-LABEL: atomicrmw_max_i64_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    mv s1, a2
+; RV32I-NEXT:    mv s2, a1
+; RV32I-NEXT:    mv s3, a0
+; RV32I-NEXT:    lw a1, 4(a0)
+; RV32I-NEXT:    lw a2, 0(a0)
+; RV32I-NEXT:    mv s4, sp
+; RV32I-NEXT:  .LBB200_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    beq a1, s1, .LBB200_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB200_1 Depth=1
+; RV32I-NEXT:    slt a0, s1, a1
+; RV32I-NEXT:    sw a2, 0(sp)
+; RV32I-NEXT:    beqz a0, .LBB200_4
+; RV32I-NEXT:    j .LBB200_5
+; RV32I-NEXT:  .LBB200_3: # in Loop: Header=BB200_1 Depth=1
+; RV32I-NEXT:    sltu a0, s2, a2
+; RV32I-NEXT:    sw a2, 0(sp)
+; RV32I-NEXT:    bnez a0, .LBB200_5
+; RV32I-NEXT:  .LBB200_4: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB200_1 Depth=1
+; RV32I-NEXT:    mv a2, s2
+; RV32I-NEXT:  .LBB200_5: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB200_1 Depth=1
+; RV32I-NEXT:    mv a3, a1
+; RV32I-NEXT:    bnez a0, .LBB200_7
+; RV32I-NEXT:  # %bb.6: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB200_1 Depth=1
+; RV32I-NEXT:    mv a3, s1
+; RV32I-NEXT:  .LBB200_7: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB200_1 Depth=1
+; RV32I-NEXT:    sw a1, 4(sp)
+; RV32I-NEXT:    mv a0, s3
+; RV32I-NEXT:    mv a1, s4
+; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    mv a5, zero
+; RV32I-NEXT:    call __atomic_compare_exchange_8
+; RV32I-NEXT:    lw a1, 4(sp)
+; RV32I-NEXT:    lw a2, 0(sp)
+; RV32I-NEXT:    beqz a0, .LBB200_1
+; RV32I-NEXT:  # %bb.8: # %atomicrmw.end
+; RV32I-NEXT:    mv a0, a2
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw max i64* %a, i64 %b monotonic
+  ret i64 %1
+}
+
+define i64 @atomicrmw_max_i64_acquire(i64 *%a, i64 %b) nounwind {
+; RV32I-LABEL: atomicrmw_max_i64_acquire:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    sw s5, 8(sp)
+; RV32I-NEXT:    mv s1, a2
+; RV32I-NEXT:    mv s2, a1
+; RV32I-NEXT:    mv s3, a0
+; RV32I-NEXT:    lw a1, 4(a0)
+; RV32I-NEXT:    lw a2, 0(a0)
+; RV32I-NEXT:    mv s4, sp
+; RV32I-NEXT:    addi s5, zero, 2
+; RV32I-NEXT:  .LBB201_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    beq a1, s1, .LBB201_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB201_1 Depth=1
+; RV32I-NEXT:    slt a0, s1, a1
+; RV32I-NEXT:    sw a2, 0(sp)
+; RV32I-NEXT:    beqz a0, .LBB201_4
+; RV32I-NEXT:    j .LBB201_5
+; RV32I-NEXT:  .LBB201_3: # in Loop: Header=BB201_1 Depth=1
+; RV32I-NEXT:    sltu a0, s2, a2
+; RV32I-NEXT:    sw a2, 0(sp)
+; RV32I-NEXT:    bnez a0, .LBB201_5
+; RV32I-NEXT:  .LBB201_4: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB201_1 Depth=1
+; RV32I-NEXT:    mv a2, s2
+; RV32I-NEXT:  .LBB201_5: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB201_1 Depth=1
+; RV32I-NEXT:    mv a3, a1
+; RV32I-NEXT:    bnez a0, .LBB201_7
+; RV32I-NEXT:  # %bb.6: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB201_1 Depth=1
+; RV32I-NEXT:    mv a3, s1
+; RV32I-NEXT:  .LBB201_7: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB201_1 Depth=1
+; RV32I-NEXT:    sw a1, 4(sp)
+; RV32I-NEXT:    mv a0, s3
+; RV32I-NEXT:    mv a1, s4
+; RV32I-NEXT:    mv a4, s5
+; RV32I-NEXT:    mv a5, s5
+; RV32I-NEXT:    call __atomic_compare_exchange_8
+; RV32I-NEXT:    lw a1, 4(sp)
+; RV32I-NEXT:    lw a2, 0(sp)
+; RV32I-NEXT:    beqz a0, .LBB201_1
+; RV32I-NEXT:  # %bb.8: # %atomicrmw.end
+; RV32I-NEXT:    mv a0, a2
+; RV32I-NEXT:    lw s5, 8(sp)
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw max i64* %a, i64 %b acquire
+  ret i64 %1
+}
+
+define i64 @atomicrmw_max_i64_release(i64 *%a, i64 %b) nounwind {
+; RV32I-LABEL: atomicrmw_max_i64_release:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    sw s5, 8(sp)
+; RV32I-NEXT:    mv s1, a2
+; RV32I-NEXT:    mv s2, a1
+; RV32I-NEXT:    mv s3, a0
+; RV32I-NEXT:    lw a1, 4(a0)
+; RV32I-NEXT:    lw a2, 0(a0)
+; RV32I-NEXT:    mv s4, sp
+; RV32I-NEXT:    addi s5, zero, 3
+; RV32I-NEXT:  .LBB202_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    beq a1, s1, .LBB202_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB202_1 Depth=1
+; RV32I-NEXT:    slt a0, s1, a1
+; RV32I-NEXT:    sw a2, 0(sp)
+; RV32I-NEXT:    beqz a0, .LBB202_4
+; RV32I-NEXT:    j .LBB202_5
+; RV32I-NEXT:  .LBB202_3: # in Loop: Header=BB202_1 Depth=1
+; RV32I-NEXT:    sltu a0, s2, a2
+; RV32I-NEXT:    sw a2, 0(sp)
+; RV32I-NEXT:    bnez a0, .LBB202_5
+; RV32I-NEXT:  .LBB202_4: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB202_1 Depth=1
+; RV32I-NEXT:    mv a2, s2
+; RV32I-NEXT:  .LBB202_5: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB202_1 Depth=1
+; RV32I-NEXT:    mv a3, a1
+; RV32I-NEXT:    bnez a0, .LBB202_7
+; RV32I-NEXT:  # %bb.6: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB202_1 Depth=1
+; RV32I-NEXT:    mv a3, s1
+; RV32I-NEXT:  .LBB202_7: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB202_1 Depth=1
+; RV32I-NEXT:    sw a1, 4(sp)
+; RV32I-NEXT:    mv a0, s3
+; RV32I-NEXT:    mv a1, s4
+; RV32I-NEXT:    mv a4, s5
+; RV32I-NEXT:    mv a5, zero
+; RV32I-NEXT:    call __atomic_compare_exchange_8
+; RV32I-NEXT:    lw a1, 4(sp)
+; RV32I-NEXT:    lw a2, 0(sp)
+; RV32I-NEXT:    beqz a0, .LBB202_1
+; RV32I-NEXT:  # %bb.8: # %atomicrmw.end
+; RV32I-NEXT:    mv a0, a2
+; RV32I-NEXT:    lw s5, 8(sp)
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw max i64* %a, i64 %b release
+  ret i64 %1
+}
+
+define i64 @atomicrmw_max_i64_acq_rel(i64 *%a, i64 %b) nounwind {
+; RV32I-LABEL: atomicrmw_max_i64_acq_rel:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -48
+; RV32I-NEXT:    sw ra, 44(sp)
+; RV32I-NEXT:    sw s1, 40(sp)
+; RV32I-NEXT:    sw s2, 36(sp)
+; RV32I-NEXT:    sw s3, 32(sp)
+; RV32I-NEXT:    sw s4, 28(sp)
+; RV32I-NEXT:    sw s5, 24(sp)
+; RV32I-NEXT:    sw s6, 20(sp)
+; RV32I-NEXT:    mv s1, a2
+; RV32I-NEXT:    mv s2, a1
+; RV32I-NEXT:    mv s3, a0
+; RV32I-NEXT:    lw a1, 4(a0)
+; RV32I-NEXT:    lw a2, 0(a0)
+; RV32I-NEXT:    addi s4, sp, 8
+; RV32I-NEXT:    addi s5, zero, 4
+; RV32I-NEXT:    addi s6, zero, 2
+; RV32I-NEXT:  .LBB203_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    beq a1, s1, .LBB203_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB203_1 Depth=1
+; RV32I-NEXT:    slt a0, s1, a1
+; RV32I-NEXT:    sw a2, 8(sp)
+; RV32I-NEXT:    beqz a0, .LBB203_4
+; RV32I-NEXT:    j .LBB203_5
+; RV32I-NEXT:  .LBB203_3: # in Loop: Header=BB203_1 Depth=1
+; RV32I-NEXT:    sltu a0, s2, a2
+; RV32I-NEXT:    sw a2, 8(sp)
+; RV32I-NEXT:    bnez a0, .LBB203_5
+; RV32I-NEXT:  .LBB203_4: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB203_1 Depth=1
+; RV32I-NEXT:    mv a2, s2
+; RV32I-NEXT:  .LBB203_5: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB203_1 Depth=1
+; RV32I-NEXT:    mv a3, a1
+; RV32I-NEXT:    bnez a0, .LBB203_7
+; RV32I-NEXT:  # %bb.6: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB203_1 Depth=1
+; RV32I-NEXT:    mv a3, s1
+; RV32I-NEXT:  .LBB203_7: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB203_1 Depth=1
+; RV32I-NEXT:    sw a1, 12(sp)
+; RV32I-NEXT:    mv a0, s3
+; RV32I-NEXT:    mv a1, s4
+; RV32I-NEXT:    mv a4, s5
+; RV32I-NEXT:    mv a5, s6
+; RV32I-NEXT:    call __atomic_compare_exchange_8
+; RV32I-NEXT:    lw a1, 12(sp)
+; RV32I-NEXT:    lw a2, 8(sp)
+; RV32I-NEXT:    beqz a0, .LBB203_1
+; RV32I-NEXT:  # %bb.8: # %atomicrmw.end
+; RV32I-NEXT:    mv a0, a2
+; RV32I-NEXT:    lw s6, 20(sp)
+; RV32I-NEXT:    lw s5, 24(sp)
+; RV32I-NEXT:    lw s4, 28(sp)
+; RV32I-NEXT:    lw s3, 32(sp)
+; RV32I-NEXT:    lw s2, 36(sp)
+; RV32I-NEXT:    lw s1, 40(sp)
+; RV32I-NEXT:    lw ra, 44(sp)
+; RV32I-NEXT:    addi sp, sp, 48
+; RV32I-NEXT:    ret
+  %1 = atomicrmw max i64* %a, i64 %b acq_rel
+  ret i64 %1
+}
+
+define i64 @atomicrmw_max_i64_seq_cst(i64 *%a, i64 %b) nounwind {
+; RV32I-LABEL: atomicrmw_max_i64_seq_cst:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    sw s5, 8(sp)
+; RV32I-NEXT:    mv s1, a2
+; RV32I-NEXT:    mv s2, a1
+; RV32I-NEXT:    mv s3, a0
+; RV32I-NEXT:    lw a1, 4(a0)
+; RV32I-NEXT:    lw a2, 0(a0)
+; RV32I-NEXT:    mv s4, sp
+; RV32I-NEXT:    addi s5, zero, 5
+; RV32I-NEXT:  .LBB204_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    beq a1, s1, .LBB204_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB204_1 Depth=1
+; RV32I-NEXT:    slt a0, s1, a1
+; RV32I-NEXT:    sw a2, 0(sp)
+; RV32I-NEXT:    beqz a0, .LBB204_4
+; RV32I-NEXT:    j .LBB204_5
+; RV32I-NEXT:  .LBB204_3: # in Loop: Header=BB204_1 Depth=1
+; RV32I-NEXT:    sltu a0, s2, a2
+; RV32I-NEXT:    sw a2, 0(sp)
+; RV32I-NEXT:    bnez a0, .LBB204_5
+; RV32I-NEXT:  .LBB204_4: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB204_1 Depth=1
+; RV32I-NEXT:    mv a2, s2
+; RV32I-NEXT:  .LBB204_5: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB204_1 Depth=1
+; RV32I-NEXT:    mv a3, a1
+; RV32I-NEXT:    bnez a0, .LBB204_7
+; RV32I-NEXT:  # %bb.6: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB204_1 Depth=1
+; RV32I-NEXT:    mv a3, s1
+; RV32I-NEXT:  .LBB204_7: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB204_1 Depth=1
+; RV32I-NEXT:    sw a1, 4(sp)
+; RV32I-NEXT:    mv a0, s3
+; RV32I-NEXT:    mv a1, s4
+; RV32I-NEXT:    mv a4, s5
+; RV32I-NEXT:    mv a5, s5
+; RV32I-NEXT:    call __atomic_compare_exchange_8
+; RV32I-NEXT:    lw a1, 4(sp)
+; RV32I-NEXT:    lw a2, 0(sp)
+; RV32I-NEXT:    beqz a0, .LBB204_1
+; RV32I-NEXT:  # %bb.8: # %atomicrmw.end
+; RV32I-NEXT:    mv a0, a2
+; RV32I-NEXT:    lw s5, 8(sp)
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw max i64* %a, i64 %b seq_cst
+  ret i64 %1
+}
+
+define i64 @atomicrmw_min_i64_monotonic(i64 *%a, i64 %b) nounwind {
+; RV32I-LABEL: atomicrmw_min_i64_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    mv s1, a2
+; RV32I-NEXT:    mv s2, a1
+; RV32I-NEXT:    mv s3, a0
+; RV32I-NEXT:    lw a1, 4(a0)
+; RV32I-NEXT:    lw a2, 0(a0)
+; RV32I-NEXT:    mv s4, sp
+; RV32I-NEXT:  .LBB205_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    beq a1, s1, .LBB205_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB205_1 Depth=1
+; RV32I-NEXT:    slt a0, s1, a1
+; RV32I-NEXT:    j .LBB205_4
+; RV32I-NEXT:  .LBB205_3: # in Loop: Header=BB205_1 Depth=1
+; RV32I-NEXT:    sltu a0, s2, a2
+; RV32I-NEXT:  .LBB205_4: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB205_1 Depth=1
+; RV32I-NEXT:    xori a0, a0, 1
+; RV32I-NEXT:    sw a2, 0(sp)
+; RV32I-NEXT:    bnez a0, .LBB205_6
+; RV32I-NEXT:  # %bb.5: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB205_1 Depth=1
+; RV32I-NEXT:    mv a2, s2
+; RV32I-NEXT:  .LBB205_6: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB205_1 Depth=1
+; RV32I-NEXT:    mv a3, a1
+; RV32I-NEXT:    bnez a0, .LBB205_8
+; RV32I-NEXT:  # %bb.7: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB205_1 Depth=1
+; RV32I-NEXT:    mv a3, s1
+; RV32I-NEXT:  .LBB205_8: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB205_1 Depth=1
+; RV32I-NEXT:    sw a1, 4(sp)
+; RV32I-NEXT:    mv a0, s3
+; RV32I-NEXT:    mv a1, s4
+; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    mv a5, zero
+; RV32I-NEXT:    call __atomic_compare_exchange_8
+; RV32I-NEXT:    lw a1, 4(sp)
+; RV32I-NEXT:    lw a2, 0(sp)
+; RV32I-NEXT:    beqz a0, .LBB205_1
+; RV32I-NEXT:  # %bb.9: # %atomicrmw.end
+; RV32I-NEXT:    mv a0, a2
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw min i64* %a, i64 %b monotonic
+  ret i64 %1
+}
+
+define i64 @atomicrmw_min_i64_acquire(i64 *%a, i64 %b) nounwind {
+; RV32I-LABEL: atomicrmw_min_i64_acquire:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    sw s5, 8(sp)
+; RV32I-NEXT:    mv s1, a2
+; RV32I-NEXT:    mv s2, a1
+; RV32I-NEXT:    mv s3, a0
+; RV32I-NEXT:    lw a1, 4(a0)
+; RV32I-NEXT:    lw a2, 0(a0)
+; RV32I-NEXT:    mv s4, sp
+; RV32I-NEXT:    addi s5, zero, 2
+; RV32I-NEXT:  .LBB206_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    beq a1, s1, .LBB206_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB206_1 Depth=1
+; RV32I-NEXT:    slt a0, s1, a1
+; RV32I-NEXT:    j .LBB206_4
+; RV32I-NEXT:  .LBB206_3: # in Loop: Header=BB206_1 Depth=1
+; RV32I-NEXT:    sltu a0, s2, a2
+; RV32I-NEXT:  .LBB206_4: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB206_1 Depth=1
+; RV32I-NEXT:    xori a0, a0, 1
+; RV32I-NEXT:    sw a2, 0(sp)
+; RV32I-NEXT:    bnez a0, .LBB206_6
+; RV32I-NEXT:  # %bb.5: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB206_1 Depth=1
+; RV32I-NEXT:    mv a2, s2
+; RV32I-NEXT:  .LBB206_6: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB206_1 Depth=1
+; RV32I-NEXT:    mv a3, a1
+; RV32I-NEXT:    bnez a0, .LBB206_8
+; RV32I-NEXT:  # %bb.7: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB206_1 Depth=1
+; RV32I-NEXT:    mv a3, s1
+; RV32I-NEXT:  .LBB206_8: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB206_1 Depth=1
+; RV32I-NEXT:    sw a1, 4(sp)
+; RV32I-NEXT:    mv a0, s3
+; RV32I-NEXT:    mv a1, s4
+; RV32I-NEXT:    mv a4, s5
+; RV32I-NEXT:    mv a5, s5
+; RV32I-NEXT:    call __atomic_compare_exchange_8
+; RV32I-NEXT:    lw a1, 4(sp)
+; RV32I-NEXT:    lw a2, 0(sp)
+; RV32I-NEXT:    beqz a0, .LBB206_1
+; RV32I-NEXT:  # %bb.9: # %atomicrmw.end
+; RV32I-NEXT:    mv a0, a2
+; RV32I-NEXT:    lw s5, 8(sp)
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw min i64* %a, i64 %b acquire
+  ret i64 %1
+}
+
+define i64 @atomicrmw_min_i64_release(i64 *%a, i64 %b) nounwind {
+; RV32I-LABEL: atomicrmw_min_i64_release:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    sw s5, 8(sp)
+; RV32I-NEXT:    mv s1, a2
+; RV32I-NEXT:    mv s2, a1
+; RV32I-NEXT:    mv s3, a0
+; RV32I-NEXT:    lw a1, 4(a0)
+; RV32I-NEXT:    lw a2, 0(a0)
+; RV32I-NEXT:    mv s4, sp
+; RV32I-NEXT:    addi s5, zero, 3
+; RV32I-NEXT:  .LBB207_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    beq a1, s1, .LBB207_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB207_1 Depth=1
+; RV32I-NEXT:    slt a0, s1, a1
+; RV32I-NEXT:    j .LBB207_4
+; RV32I-NEXT:  .LBB207_3: # in Loop: Header=BB207_1 Depth=1
+; RV32I-NEXT:    sltu a0, s2, a2
+; RV32I-NEXT:  .LBB207_4: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB207_1 Depth=1
+; RV32I-NEXT:    xori a0, a0, 1
+; RV32I-NEXT:    sw a2, 0(sp)
+; RV32I-NEXT:    bnez a0, .LBB207_6
+; RV32I-NEXT:  # %bb.5: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB207_1 Depth=1
+; RV32I-NEXT:    mv a2, s2
+; RV32I-NEXT:  .LBB207_6: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB207_1 Depth=1
+; RV32I-NEXT:    mv a3, a1
+; RV32I-NEXT:    bnez a0, .LBB207_8
+; RV32I-NEXT:  # %bb.7: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB207_1 Depth=1
+; RV32I-NEXT:    mv a3, s1
+; RV32I-NEXT:  .LBB207_8: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB207_1 Depth=1
+; RV32I-NEXT:    sw a1, 4(sp)
+; RV32I-NEXT:    mv a0, s3
+; RV32I-NEXT:    mv a1, s4
+; RV32I-NEXT:    mv a4, s5
+; RV32I-NEXT:    mv a5, zero
+; RV32I-NEXT:    call __atomic_compare_exchange_8
+; RV32I-NEXT:    lw a1, 4(sp)
+; RV32I-NEXT:    lw a2, 0(sp)
+; RV32I-NEXT:    beqz a0, .LBB207_1
+; RV32I-NEXT:  # %bb.9: # %atomicrmw.end
+; RV32I-NEXT:    mv a0, a2
+; RV32I-NEXT:    lw s5, 8(sp)
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw min i64* %a, i64 %b release
+  ret i64 %1
+}
+
+define i64 @atomicrmw_min_i64_acq_rel(i64 *%a, i64 %b) nounwind {
+; RV32I-LABEL: atomicrmw_min_i64_acq_rel:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -48
+; RV32I-NEXT:    sw ra, 44(sp)
+; RV32I-NEXT:    sw s1, 40(sp)
+; RV32I-NEXT:    sw s2, 36(sp)
+; RV32I-NEXT:    sw s3, 32(sp)
+; RV32I-NEXT:    sw s4, 28(sp)
+; RV32I-NEXT:    sw s5, 24(sp)
+; RV32I-NEXT:    sw s6, 20(sp)
+; RV32I-NEXT:    mv s1, a2
+; RV32I-NEXT:    mv s2, a1
+; RV32I-NEXT:    mv s3, a0
+; RV32I-NEXT:    lw a1, 4(a0)
+; RV32I-NEXT:    lw a2, 0(a0)
+; RV32I-NEXT:    addi s4, sp, 8
+; RV32I-NEXT:    addi s5, zero, 4
+; RV32I-NEXT:    addi s6, zero, 2
+; RV32I-NEXT:  .LBB208_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    beq a1, s1, .LBB208_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB208_1 Depth=1
+; RV32I-NEXT:    slt a0, s1, a1
+; RV32I-NEXT:    j .LBB208_4
+; RV32I-NEXT:  .LBB208_3: # in Loop: Header=BB208_1 Depth=1
+; RV32I-NEXT:    sltu a0, s2, a2
+; RV32I-NEXT:  .LBB208_4: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB208_1 Depth=1
+; RV32I-NEXT:    xori a0, a0, 1
+; RV32I-NEXT:    sw a2, 8(sp)
+; RV32I-NEXT:    bnez a0, .LBB208_6
+; RV32I-NEXT:  # %bb.5: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB208_1 Depth=1
+; RV32I-NEXT:    mv a2, s2
+; RV32I-NEXT:  .LBB208_6: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB208_1 Depth=1
+; RV32I-NEXT:    mv a3, a1
+; RV32I-NEXT:    bnez a0, .LBB208_8
+; RV32I-NEXT:  # %bb.7: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB208_1 Depth=1
+; RV32I-NEXT:    mv a3, s1
+; RV32I-NEXT:  .LBB208_8: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB208_1 Depth=1
+; RV32I-NEXT:    sw a1, 12(sp)
+; RV32I-NEXT:    mv a0, s3
+; RV32I-NEXT:    mv a1, s4
+; RV32I-NEXT:    mv a4, s5
+; RV32I-NEXT:    mv a5, s6
+; RV32I-NEXT:    call __atomic_compare_exchange_8
+; RV32I-NEXT:    lw a1, 12(sp)
+; RV32I-NEXT:    lw a2, 8(sp)
+; RV32I-NEXT:    beqz a0, .LBB208_1
+; RV32I-NEXT:  # %bb.9: # %atomicrmw.end
+; RV32I-NEXT:    mv a0, a2
+; RV32I-NEXT:    lw s6, 20(sp)
+; RV32I-NEXT:    lw s5, 24(sp)
+; RV32I-NEXT:    lw s4, 28(sp)
+; RV32I-NEXT:    lw s3, 32(sp)
+; RV32I-NEXT:    lw s2, 36(sp)
+; RV32I-NEXT:    lw s1, 40(sp)
+; RV32I-NEXT:    lw ra, 44(sp)
+; RV32I-NEXT:    addi sp, sp, 48
+; RV32I-NEXT:    ret
+  %1 = atomicrmw min i64* %a, i64 %b acq_rel
+  ret i64 %1
+}
+
+define i64 @atomicrmw_min_i64_seq_cst(i64 *%a, i64 %b) nounwind {
+; RV32I-LABEL: atomicrmw_min_i64_seq_cst:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    sw s5, 8(sp)
+; RV32I-NEXT:    mv s1, a2
+; RV32I-NEXT:    mv s2, a1
+; RV32I-NEXT:    mv s3, a0
+; RV32I-NEXT:    lw a1, 4(a0)
+; RV32I-NEXT:    lw a2, 0(a0)
+; RV32I-NEXT:    mv s4, sp
+; RV32I-NEXT:    addi s5, zero, 5
+; RV32I-NEXT:  .LBB209_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    beq a1, s1, .LBB209_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB209_1 Depth=1
+; RV32I-NEXT:    slt a0, s1, a1
+; RV32I-NEXT:    j .LBB209_4
+; RV32I-NEXT:  .LBB209_3: # in Loop: Header=BB209_1 Depth=1
+; RV32I-NEXT:    sltu a0, s2, a2
+; RV32I-NEXT:  .LBB209_4: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB209_1 Depth=1
+; RV32I-NEXT:    xori a0, a0, 1
+; RV32I-NEXT:    sw a2, 0(sp)
+; RV32I-NEXT:    bnez a0, .LBB209_6
+; RV32I-NEXT:  # %bb.5: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB209_1 Depth=1
+; RV32I-NEXT:    mv a2, s2
+; RV32I-NEXT:  .LBB209_6: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB209_1 Depth=1
+; RV32I-NEXT:    mv a3, a1
+; RV32I-NEXT:    bnez a0, .LBB209_8
+; RV32I-NEXT:  # %bb.7: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB209_1 Depth=1
+; RV32I-NEXT:    mv a3, s1
+; RV32I-NEXT:  .LBB209_8: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB209_1 Depth=1
+; RV32I-NEXT:    sw a1, 4(sp)
+; RV32I-NEXT:    mv a0, s3
+; RV32I-NEXT:    mv a1, s4
+; RV32I-NEXT:    mv a4, s5
+; RV32I-NEXT:    mv a5, s5
+; RV32I-NEXT:    call __atomic_compare_exchange_8
+; RV32I-NEXT:    lw a1, 4(sp)
+; RV32I-NEXT:    lw a2, 0(sp)
+; RV32I-NEXT:    beqz a0, .LBB209_1
+; RV32I-NEXT:  # %bb.9: # %atomicrmw.end
+; RV32I-NEXT:    mv a0, a2
+; RV32I-NEXT:    lw s5, 8(sp)
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw min i64* %a, i64 %b seq_cst
+  ret i64 %1
+}
+
+define i64 @atomicrmw_umax_i64_monotonic(i64 *%a, i64 %b) nounwind {
+; RV32I-LABEL: atomicrmw_umax_i64_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    mv s1, a2
+; RV32I-NEXT:    mv s2, a1
+; RV32I-NEXT:    mv s3, a0
+; RV32I-NEXT:    lw a1, 4(a0)
+; RV32I-NEXT:    lw a2, 0(a0)
+; RV32I-NEXT:    mv s4, sp
+; RV32I-NEXT:  .LBB210_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    beq a1, s1, .LBB210_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB210_1 Depth=1
+; RV32I-NEXT:    sltu a0, s1, a1
+; RV32I-NEXT:    sw a2, 0(sp)
+; RV32I-NEXT:    beqz a0, .LBB210_4
+; RV32I-NEXT:    j .LBB210_5
+; RV32I-NEXT:  .LBB210_3: # in Loop: Header=BB210_1 Depth=1
+; RV32I-NEXT:    sltu a0, s2, a2
+; RV32I-NEXT:    sw a2, 0(sp)
+; RV32I-NEXT:    bnez a0, .LBB210_5
+; RV32I-NEXT:  .LBB210_4: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB210_1 Depth=1
+; RV32I-NEXT:    mv a2, s2
+; RV32I-NEXT:  .LBB210_5: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB210_1 Depth=1
+; RV32I-NEXT:    mv a3, a1
+; RV32I-NEXT:    bnez a0, .LBB210_7
+; RV32I-NEXT:  # %bb.6: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB210_1 Depth=1
+; RV32I-NEXT:    mv a3, s1
+; RV32I-NEXT:  .LBB210_7: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB210_1 Depth=1
+; RV32I-NEXT:    sw a1, 4(sp)
+; RV32I-NEXT:    mv a0, s3
+; RV32I-NEXT:    mv a1, s4
+; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    mv a5, zero
+; RV32I-NEXT:    call __atomic_compare_exchange_8
+; RV32I-NEXT:    lw a1, 4(sp)
+; RV32I-NEXT:    lw a2, 0(sp)
+; RV32I-NEXT:    beqz a0, .LBB210_1
+; RV32I-NEXT:  # %bb.8: # %atomicrmw.end
+; RV32I-NEXT:    mv a0, a2
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw umax i64* %a, i64 %b monotonic
+  ret i64 %1
+}
+
+define i64 @atomicrmw_umax_i64_acquire(i64 *%a, i64 %b) nounwind {
+; RV32I-LABEL: atomicrmw_umax_i64_acquire:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    sw s5, 8(sp)
+; RV32I-NEXT:    mv s1, a2
+; RV32I-NEXT:    mv s2, a1
+; RV32I-NEXT:    mv s3, a0
+; RV32I-NEXT:    lw a1, 4(a0)
+; RV32I-NEXT:    lw a2, 0(a0)
+; RV32I-NEXT:    mv s4, sp
+; RV32I-NEXT:    addi s5, zero, 2
+; RV32I-NEXT:  .LBB211_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    beq a1, s1, .LBB211_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB211_1 Depth=1
+; RV32I-NEXT:    sltu a0, s1, a1
+; RV32I-NEXT:    sw a2, 0(sp)
+; RV32I-NEXT:    beqz a0, .LBB211_4
+; RV32I-NEXT:    j .LBB211_5
+; RV32I-NEXT:  .LBB211_3: # in Loop: Header=BB211_1 Depth=1
+; RV32I-NEXT:    sltu a0, s2, a2
+; RV32I-NEXT:    sw a2, 0(sp)
+; RV32I-NEXT:    bnez a0, .LBB211_5
+; RV32I-NEXT:  .LBB211_4: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB211_1 Depth=1
+; RV32I-NEXT:    mv a2, s2
+; RV32I-NEXT:  .LBB211_5: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB211_1 Depth=1
+; RV32I-NEXT:    mv a3, a1
+; RV32I-NEXT:    bnez a0, .LBB211_7
+; RV32I-NEXT:  # %bb.6: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB211_1 Depth=1
+; RV32I-NEXT:    mv a3, s1
+; RV32I-NEXT:  .LBB211_7: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB211_1 Depth=1
+; RV32I-NEXT:    sw a1, 4(sp)
+; RV32I-NEXT:    mv a0, s3
+; RV32I-NEXT:    mv a1, s4
+; RV32I-NEXT:    mv a4, s5
+; RV32I-NEXT:    mv a5, s5
+; RV32I-NEXT:    call __atomic_compare_exchange_8
+; RV32I-NEXT:    lw a1, 4(sp)
+; RV32I-NEXT:    lw a2, 0(sp)
+; RV32I-NEXT:    beqz a0, .LBB211_1
+; RV32I-NEXT:  # %bb.8: # %atomicrmw.end
+; RV32I-NEXT:    mv a0, a2
+; RV32I-NEXT:    lw s5, 8(sp)
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw umax i64* %a, i64 %b acquire
+  ret i64 %1
+}
+
+define i64 @atomicrmw_umax_i64_release(i64 *%a, i64 %b) nounwind {
+; RV32I-LABEL: atomicrmw_umax_i64_release:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    sw s5, 8(sp)
+; RV32I-NEXT:    mv s1, a2
+; RV32I-NEXT:    mv s2, a1
+; RV32I-NEXT:    mv s3, a0
+; RV32I-NEXT:    lw a1, 4(a0)
+; RV32I-NEXT:    lw a2, 0(a0)
+; RV32I-NEXT:    mv s4, sp
+; RV32I-NEXT:    addi s5, zero, 3
+; RV32I-NEXT:  .LBB212_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    beq a1, s1, .LBB212_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB212_1 Depth=1
+; RV32I-NEXT:    sltu a0, s1, a1
+; RV32I-NEXT:    sw a2, 0(sp)
+; RV32I-NEXT:    beqz a0, .LBB212_4
+; RV32I-NEXT:    j .LBB212_5
+; RV32I-NEXT:  .LBB212_3: # in Loop: Header=BB212_1 Depth=1
+; RV32I-NEXT:    sltu a0, s2, a2
+; RV32I-NEXT:    sw a2, 0(sp)
+; RV32I-NEXT:    bnez a0, .LBB212_5
+; RV32I-NEXT:  .LBB212_4: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB212_1 Depth=1
+; RV32I-NEXT:    mv a2, s2
+; RV32I-NEXT:  .LBB212_5: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB212_1 Depth=1
+; RV32I-NEXT:    mv a3, a1
+; RV32I-NEXT:    bnez a0, .LBB212_7
+; RV32I-NEXT:  # %bb.6: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB212_1 Depth=1
+; RV32I-NEXT:    mv a3, s1
+; RV32I-NEXT:  .LBB212_7: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB212_1 Depth=1
+; RV32I-NEXT:    sw a1, 4(sp)
+; RV32I-NEXT:    mv a0, s3
+; RV32I-NEXT:    mv a1, s4
+; RV32I-NEXT:    mv a4, s5
+; RV32I-NEXT:    mv a5, zero
+; RV32I-NEXT:    call __atomic_compare_exchange_8
+; RV32I-NEXT:    lw a1, 4(sp)
+; RV32I-NEXT:    lw a2, 0(sp)
+; RV32I-NEXT:    beqz a0, .LBB212_1
+; RV32I-NEXT:  # %bb.8: # %atomicrmw.end
+; RV32I-NEXT:    mv a0, a2
+; RV32I-NEXT:    lw s5, 8(sp)
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw umax i64* %a, i64 %b release
+  ret i64 %1
+}
+
+define i64 @atomicrmw_umax_i64_acq_rel(i64 *%a, i64 %b) nounwind {
+; RV32I-LABEL: atomicrmw_umax_i64_acq_rel:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -48
+; RV32I-NEXT:    sw ra, 44(sp)
+; RV32I-NEXT:    sw s1, 40(sp)
+; RV32I-NEXT:    sw s2, 36(sp)
+; RV32I-NEXT:    sw s3, 32(sp)
+; RV32I-NEXT:    sw s4, 28(sp)
+; RV32I-NEXT:    sw s5, 24(sp)
+; RV32I-NEXT:    sw s6, 20(sp)
+; RV32I-NEXT:    mv s1, a2
+; RV32I-NEXT:    mv s2, a1
+; RV32I-NEXT:    mv s3, a0
+; RV32I-NEXT:    lw a1, 4(a0)
+; RV32I-NEXT:    lw a2, 0(a0)
+; RV32I-NEXT:    addi s4, sp, 8
+; RV32I-NEXT:    addi s5, zero, 4
+; RV32I-NEXT:    addi s6, zero, 2
+; RV32I-NEXT:  .LBB213_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    beq a1, s1, .LBB213_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB213_1 Depth=1
+; RV32I-NEXT:    sltu a0, s1, a1
+; RV32I-NEXT:    sw a2, 8(sp)
+; RV32I-NEXT:    beqz a0, .LBB213_4
+; RV32I-NEXT:    j .LBB213_5
+; RV32I-NEXT:  .LBB213_3: # in Loop: Header=BB213_1 Depth=1
+; RV32I-NEXT:    sltu a0, s2, a2
+; RV32I-NEXT:    sw a2, 8(sp)
+; RV32I-NEXT:    bnez a0, .LBB213_5
+; RV32I-NEXT:  .LBB213_4: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB213_1 Depth=1
+; RV32I-NEXT:    mv a2, s2
+; RV32I-NEXT:  .LBB213_5: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB213_1 Depth=1
+; RV32I-NEXT:    mv a3, a1
+; RV32I-NEXT:    bnez a0, .LBB213_7
+; RV32I-NEXT:  # %bb.6: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB213_1 Depth=1
+; RV32I-NEXT:    mv a3, s1
+; RV32I-NEXT:  .LBB213_7: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB213_1 Depth=1
+; RV32I-NEXT:    sw a1, 12(sp)
+; RV32I-NEXT:    mv a0, s3
+; RV32I-NEXT:    mv a1, s4
+; RV32I-NEXT:    mv a4, s5
+; RV32I-NEXT:    mv a5, s6
+; RV32I-NEXT:    call __atomic_compare_exchange_8
+; RV32I-NEXT:    lw a1, 12(sp)
+; RV32I-NEXT:    lw a2, 8(sp)
+; RV32I-NEXT:    beqz a0, .LBB213_1
+; RV32I-NEXT:  # %bb.8: # %atomicrmw.end
+; RV32I-NEXT:    mv a0, a2
+; RV32I-NEXT:    lw s6, 20(sp)
+; RV32I-NEXT:    lw s5, 24(sp)
+; RV32I-NEXT:    lw s4, 28(sp)
+; RV32I-NEXT:    lw s3, 32(sp)
+; RV32I-NEXT:    lw s2, 36(sp)
+; RV32I-NEXT:    lw s1, 40(sp)
+; RV32I-NEXT:    lw ra, 44(sp)
+; RV32I-NEXT:    addi sp, sp, 48
+; RV32I-NEXT:    ret
+  %1 = atomicrmw umax i64* %a, i64 %b acq_rel
+  ret i64 %1
+}
+
+define i64 @atomicrmw_umax_i64_seq_cst(i64 *%a, i64 %b) nounwind {
+; RV32I-LABEL: atomicrmw_umax_i64_seq_cst:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    sw s5, 8(sp)
+; RV32I-NEXT:    mv s1, a2
+; RV32I-NEXT:    mv s2, a1
+; RV32I-NEXT:    mv s3, a0
+; RV32I-NEXT:    lw a1, 4(a0)
+; RV32I-NEXT:    lw a2, 0(a0)
+; RV32I-NEXT:    mv s4, sp
+; RV32I-NEXT:    addi s5, zero, 5
+; RV32I-NEXT:  .LBB214_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    beq a1, s1, .LBB214_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB214_1 Depth=1
+; RV32I-NEXT:    sltu a0, s1, a1
+; RV32I-NEXT:    sw a2, 0(sp)
+; RV32I-NEXT:    beqz a0, .LBB214_4
+; RV32I-NEXT:    j .LBB214_5
+; RV32I-NEXT:  .LBB214_3: # in Loop: Header=BB214_1 Depth=1
+; RV32I-NEXT:    sltu a0, s2, a2
+; RV32I-NEXT:    sw a2, 0(sp)
+; RV32I-NEXT:    bnez a0, .LBB214_5
+; RV32I-NEXT:  .LBB214_4: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB214_1 Depth=1
+; RV32I-NEXT:    mv a2, s2
+; RV32I-NEXT:  .LBB214_5: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB214_1 Depth=1
+; RV32I-NEXT:    mv a3, a1
+; RV32I-NEXT:    bnez a0, .LBB214_7
+; RV32I-NEXT:  # %bb.6: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB214_1 Depth=1
+; RV32I-NEXT:    mv a3, s1
+; RV32I-NEXT:  .LBB214_7: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB214_1 Depth=1
+; RV32I-NEXT:    sw a1, 4(sp)
+; RV32I-NEXT:    mv a0, s3
+; RV32I-NEXT:    mv a1, s4
+; RV32I-NEXT:    mv a4, s5
+; RV32I-NEXT:    mv a5, s5
+; RV32I-NEXT:    call __atomic_compare_exchange_8
+; RV32I-NEXT:    lw a1, 4(sp)
+; RV32I-NEXT:    lw a2, 0(sp)
+; RV32I-NEXT:    beqz a0, .LBB214_1
+; RV32I-NEXT:  # %bb.8: # %atomicrmw.end
+; RV32I-NEXT:    mv a0, a2
+; RV32I-NEXT:    lw s5, 8(sp)
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw umax i64* %a, i64 %b seq_cst
+  ret i64 %1
+}
+
+define i64 @atomicrmw_umin_i64_monotonic(i64 *%a, i64 %b) nounwind {
+; RV32I-LABEL: atomicrmw_umin_i64_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    mv s1, a2
+; RV32I-NEXT:    mv s2, a1
+; RV32I-NEXT:    mv s3, a0
+; RV32I-NEXT:    lw a1, 4(a0)
+; RV32I-NEXT:    lw a2, 0(a0)
+; RV32I-NEXT:    mv s4, sp
+; RV32I-NEXT:  .LBB215_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    beq a1, s1, .LBB215_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB215_1 Depth=1
+; RV32I-NEXT:    sltu a0, s1, a1
+; RV32I-NEXT:    j .LBB215_4
+; RV32I-NEXT:  .LBB215_3: # in Loop: Header=BB215_1 Depth=1
+; RV32I-NEXT:    sltu a0, s2, a2
+; RV32I-NEXT:  .LBB215_4: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB215_1 Depth=1
+; RV32I-NEXT:    xori a0, a0, 1
+; RV32I-NEXT:    sw a2, 0(sp)
+; RV32I-NEXT:    bnez a0, .LBB215_6
+; RV32I-NEXT:  # %bb.5: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB215_1 Depth=1
+; RV32I-NEXT:    mv a2, s2
+; RV32I-NEXT:  .LBB215_6: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB215_1 Depth=1
+; RV32I-NEXT:    mv a3, a1
+; RV32I-NEXT:    bnez a0, .LBB215_8
+; RV32I-NEXT:  # %bb.7: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB215_1 Depth=1
+; RV32I-NEXT:    mv a3, s1
+; RV32I-NEXT:  .LBB215_8: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB215_1 Depth=1
+; RV32I-NEXT:    sw a1, 4(sp)
+; RV32I-NEXT:    mv a0, s3
+; RV32I-NEXT:    mv a1, s4
+; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    mv a5, zero
+; RV32I-NEXT:    call __atomic_compare_exchange_8
+; RV32I-NEXT:    lw a1, 4(sp)
+; RV32I-NEXT:    lw a2, 0(sp)
+; RV32I-NEXT:    beqz a0, .LBB215_1
+; RV32I-NEXT:  # %bb.9: # %atomicrmw.end
+; RV32I-NEXT:    mv a0, a2
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw umin i64* %a, i64 %b monotonic
+  ret i64 %1
+}
+
+define i64 @atomicrmw_umin_i64_acquire(i64 *%a, i64 %b) nounwind {
+; RV32I-LABEL: atomicrmw_umin_i64_acquire:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    sw s5, 8(sp)
+; RV32I-NEXT:    mv s1, a2
+; RV32I-NEXT:    mv s2, a1
+; RV32I-NEXT:    mv s3, a0
+; RV32I-NEXT:    lw a1, 4(a0)
+; RV32I-NEXT:    lw a2, 0(a0)
+; RV32I-NEXT:    mv s4, sp
+; RV32I-NEXT:    addi s5, zero, 2
+; RV32I-NEXT:  .LBB216_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    beq a1, s1, .LBB216_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB216_1 Depth=1
+; RV32I-NEXT:    sltu a0, s1, a1
+; RV32I-NEXT:    j .LBB216_4
+; RV32I-NEXT:  .LBB216_3: # in Loop: Header=BB216_1 Depth=1
+; RV32I-NEXT:    sltu a0, s2, a2
+; RV32I-NEXT:  .LBB216_4: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB216_1 Depth=1
+; RV32I-NEXT:    xori a0, a0, 1
+; RV32I-NEXT:    sw a2, 0(sp)
+; RV32I-NEXT:    bnez a0, .LBB216_6
+; RV32I-NEXT:  # %bb.5: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB216_1 Depth=1
+; RV32I-NEXT:    mv a2, s2
+; RV32I-NEXT:  .LBB216_6: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB216_1 Depth=1
+; RV32I-NEXT:    mv a3, a1
+; RV32I-NEXT:    bnez a0, .LBB216_8
+; RV32I-NEXT:  # %bb.7: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB216_1 Depth=1
+; RV32I-NEXT:    mv a3, s1
+; RV32I-NEXT:  .LBB216_8: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB216_1 Depth=1
+; RV32I-NEXT:    sw a1, 4(sp)
+; RV32I-NEXT:    mv a0, s3
+; RV32I-NEXT:    mv a1, s4
+; RV32I-NEXT:    mv a4, s5
+; RV32I-NEXT:    mv a5, s5
+; RV32I-NEXT:    call __atomic_compare_exchange_8
+; RV32I-NEXT:    lw a1, 4(sp)
+; RV32I-NEXT:    lw a2, 0(sp)
+; RV32I-NEXT:    beqz a0, .LBB216_1
+; RV32I-NEXT:  # %bb.9: # %atomicrmw.end
+; RV32I-NEXT:    mv a0, a2
+; RV32I-NEXT:    lw s5, 8(sp)
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw umin i64* %a, i64 %b acquire
+  ret i64 %1
+}
+
+define i64 @atomicrmw_umin_i64_release(i64 *%a, i64 %b) nounwind {
+; RV32I-LABEL: atomicrmw_umin_i64_release:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    sw s5, 8(sp)
+; RV32I-NEXT:    mv s1, a2
+; RV32I-NEXT:    mv s2, a1
+; RV32I-NEXT:    mv s3, a0
+; RV32I-NEXT:    lw a1, 4(a0)
+; RV32I-NEXT:    lw a2, 0(a0)
+; RV32I-NEXT:    mv s4, sp
+; RV32I-NEXT:    addi s5, zero, 3
+; RV32I-NEXT:  .LBB217_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    beq a1, s1, .LBB217_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB217_1 Depth=1
+; RV32I-NEXT:    sltu a0, s1, a1
+; RV32I-NEXT:    j .LBB217_4
+; RV32I-NEXT:  .LBB217_3: # in Loop: Header=BB217_1 Depth=1
+; RV32I-NEXT:    sltu a0, s2, a2
+; RV32I-NEXT:  .LBB217_4: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB217_1 Depth=1
+; RV32I-NEXT:    xori a0, a0, 1
+; RV32I-NEXT:    sw a2, 0(sp)
+; RV32I-NEXT:    bnez a0, .LBB217_6
+; RV32I-NEXT:  # %bb.5: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB217_1 Depth=1
+; RV32I-NEXT:    mv a2, s2
+; RV32I-NEXT:  .LBB217_6: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB217_1 Depth=1
+; RV32I-NEXT:    mv a3, a1
+; RV32I-NEXT:    bnez a0, .LBB217_8
+; RV32I-NEXT:  # %bb.7: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB217_1 Depth=1
+; RV32I-NEXT:    mv a3, s1
+; RV32I-NEXT:  .LBB217_8: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB217_1 Depth=1
+; RV32I-NEXT:    sw a1, 4(sp)
+; RV32I-NEXT:    mv a0, s3
+; RV32I-NEXT:    mv a1, s4
+; RV32I-NEXT:    mv a4, s5
+; RV32I-NEXT:    mv a5, zero
+; RV32I-NEXT:    call __atomic_compare_exchange_8
+; RV32I-NEXT:    lw a1, 4(sp)
+; RV32I-NEXT:    lw a2, 0(sp)
+; RV32I-NEXT:    beqz a0, .LBB217_1
+; RV32I-NEXT:  # %bb.9: # %atomicrmw.end
+; RV32I-NEXT:    mv a0, a2
+; RV32I-NEXT:    lw s5, 8(sp)
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw umin i64* %a, i64 %b release
+  ret i64 %1
+}
+
+define i64 @atomicrmw_umin_i64_acq_rel(i64 *%a, i64 %b) nounwind {
+; RV32I-LABEL: atomicrmw_umin_i64_acq_rel:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -48
+; RV32I-NEXT:    sw ra, 44(sp)
+; RV32I-NEXT:    sw s1, 40(sp)
+; RV32I-NEXT:    sw s2, 36(sp)
+; RV32I-NEXT:    sw s3, 32(sp)
+; RV32I-NEXT:    sw s4, 28(sp)
+; RV32I-NEXT:    sw s5, 24(sp)
+; RV32I-NEXT:    sw s6, 20(sp)
+; RV32I-NEXT:    mv s1, a2
+; RV32I-NEXT:    mv s2, a1
+; RV32I-NEXT:    mv s3, a0
+; RV32I-NEXT:    lw a1, 4(a0)
+; RV32I-NEXT:    lw a2, 0(a0)
+; RV32I-NEXT:    addi s4, sp, 8
+; RV32I-NEXT:    addi s5, zero, 4
+; RV32I-NEXT:    addi s6, zero, 2
+; RV32I-NEXT:  .LBB218_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    beq a1, s1, .LBB218_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB218_1 Depth=1
+; RV32I-NEXT:    sltu a0, s1, a1
+; RV32I-NEXT:    j .LBB218_4
+; RV32I-NEXT:  .LBB218_3: # in Loop: Header=BB218_1 Depth=1
+; RV32I-NEXT:    sltu a0, s2, a2
+; RV32I-NEXT:  .LBB218_4: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB218_1 Depth=1
+; RV32I-NEXT:    xori a0, a0, 1
+; RV32I-NEXT:    sw a2, 8(sp)
+; RV32I-NEXT:    bnez a0, .LBB218_6
+; RV32I-NEXT:  # %bb.5: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB218_1 Depth=1
+; RV32I-NEXT:    mv a2, s2
+; RV32I-NEXT:  .LBB218_6: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB218_1 Depth=1
+; RV32I-NEXT:    mv a3, a1
+; RV32I-NEXT:    bnez a0, .LBB218_8
+; RV32I-NEXT:  # %bb.7: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB218_1 Depth=1
+; RV32I-NEXT:    mv a3, s1
+; RV32I-NEXT:  .LBB218_8: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB218_1 Depth=1
+; RV32I-NEXT:    sw a1, 12(sp)
+; RV32I-NEXT:    mv a0, s3
+; RV32I-NEXT:    mv a1, s4
+; RV32I-NEXT:    mv a4, s5
+; RV32I-NEXT:    mv a5, s6
+; RV32I-NEXT:    call __atomic_compare_exchange_8
+; RV32I-NEXT:    lw a1, 12(sp)
+; RV32I-NEXT:    lw a2, 8(sp)
+; RV32I-NEXT:    beqz a0, .LBB218_1
+; RV32I-NEXT:  # %bb.9: # %atomicrmw.end
+; RV32I-NEXT:    mv a0, a2
+; RV32I-NEXT:    lw s6, 20(sp)
+; RV32I-NEXT:    lw s5, 24(sp)
+; RV32I-NEXT:    lw s4, 28(sp)
+; RV32I-NEXT:    lw s3, 32(sp)
+; RV32I-NEXT:    lw s2, 36(sp)
+; RV32I-NEXT:    lw s1, 40(sp)
+; RV32I-NEXT:    lw ra, 44(sp)
+; RV32I-NEXT:    addi sp, sp, 48
+; RV32I-NEXT:    ret
+  %1 = atomicrmw umin i64* %a, i64 %b acq_rel
+  ret i64 %1
+}
+
+define i64 @atomicrmw_umin_i64_seq_cst(i64 *%a, i64 %b) nounwind {
+; RV32I-LABEL: atomicrmw_umin_i64_seq_cst:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp)
+; RV32I-NEXT:    sw s1, 24(sp)
+; RV32I-NEXT:    sw s2, 20(sp)
+; RV32I-NEXT:    sw s3, 16(sp)
+; RV32I-NEXT:    sw s4, 12(sp)
+; RV32I-NEXT:    sw s5, 8(sp)
+; RV32I-NEXT:    mv s1, a2
+; RV32I-NEXT:    mv s2, a1
+; RV32I-NEXT:    mv s3, a0
+; RV32I-NEXT:    lw a1, 4(a0)
+; RV32I-NEXT:    lw a2, 0(a0)
+; RV32I-NEXT:    mv s4, sp
+; RV32I-NEXT:    addi s5, zero, 5
+; RV32I-NEXT:  .LBB219_1: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    beq a1, s1, .LBB219_3
+; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB219_1 Depth=1
+; RV32I-NEXT:    sltu a0, s1, a1
+; RV32I-NEXT:    j .LBB219_4
+; RV32I-NEXT:  .LBB219_3: # in Loop: Header=BB219_1 Depth=1
+; RV32I-NEXT:    sltu a0, s2, a2
+; RV32I-NEXT:  .LBB219_4: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB219_1 Depth=1
+; RV32I-NEXT:    xori a0, a0, 1
+; RV32I-NEXT:    sw a2, 0(sp)
+; RV32I-NEXT:    bnez a0, .LBB219_6
+; RV32I-NEXT:  # %bb.5: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB219_1 Depth=1
+; RV32I-NEXT:    mv a2, s2
+; RV32I-NEXT:  .LBB219_6: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB219_1 Depth=1
+; RV32I-NEXT:    mv a3, a1
+; RV32I-NEXT:    bnez a0, .LBB219_8
+; RV32I-NEXT:  # %bb.7: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB219_1 Depth=1
+; RV32I-NEXT:    mv a3, s1
+; RV32I-NEXT:  .LBB219_8: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB219_1 Depth=1
+; RV32I-NEXT:    sw a1, 4(sp)
+; RV32I-NEXT:    mv a0, s3
+; RV32I-NEXT:    mv a1, s4
+; RV32I-NEXT:    mv a4, s5
+; RV32I-NEXT:    mv a5, s5
+; RV32I-NEXT:    call __atomic_compare_exchange_8
+; RV32I-NEXT:    lw a1, 4(sp)
+; RV32I-NEXT:    lw a2, 0(sp)
+; RV32I-NEXT:    beqz a0, .LBB219_1
+; RV32I-NEXT:  # %bb.9: # %atomicrmw.end
+; RV32I-NEXT:    mv a0, a2
+; RV32I-NEXT:    lw s5, 8(sp)
+; RV32I-NEXT:    lw s4, 12(sp)
+; RV32I-NEXT:    lw s3, 16(sp)
+; RV32I-NEXT:    lw s2, 20(sp)
+; RV32I-NEXT:    lw s1, 24(sp)
+; RV32I-NEXT:    lw ra, 28(sp)
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+  %1 = atomicrmw umin i64* %a, i64 %b seq_cst
+  ret i64 %1
+}




More information about the llvm-commits mailing list