[llvm] r350962 - [RISCV][NFC] Add CHECK lines for atomic operations on RV64I

Alex Bradbury via llvm-commits llvm-commits at lists.llvm.org
Fri Jan 11 11:46:49 PST 2019


Author: asb
Date: Fri Jan 11 11:46:48 2019
New Revision: 350962

URL: http://llvm.org/viewvc/llvm-project?rev=350962&view=rev
Log:
[RISCV][NFC] Add CHECK lines for atomic operations on RV64I

As or RV32I, we include these for completeness. Committing now to make it
easier to review the RV64A patch.


Modified:
    llvm/trunk/test/CodeGen/RISCV/atomic-cmpxchg.ll
    llvm/trunk/test/CodeGen/RISCV/atomic-fence.ll
    llvm/trunk/test/CodeGen/RISCV/atomic-load-store.ll
    llvm/trunk/test/CodeGen/RISCV/atomic-rmw.ll

Modified: llvm/trunk/test/CodeGen/RISCV/atomic-cmpxchg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/RISCV/atomic-cmpxchg.ll?rev=350962&r1=350961&r2=350962&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/RISCV/atomic-cmpxchg.ll (original)
+++ llvm/trunk/test/CodeGen/RISCV/atomic-cmpxchg.ll Fri Jan 11 11:46:48 2019
@@ -3,6 +3,8 @@
 ; RUN:   | FileCheck -check-prefix=RV32I %s
 ; RUN: llc -mtriple=riscv32 -mattr=+a -verify-machineinstrs < %s \
 ; RUN:   | FileCheck -check-prefix=RV32IA %s
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV64I %s
 
 define void @cmpxchg_i8_monotonic_monotonic(i8* %ptr, i8 %cmp, i8 %val) {
 ; RV32I-LABEL: cmpxchg_i8_monotonic_monotonic:
@@ -41,6 +43,19 @@ define void @cmpxchg_i8_monotonic_monoto
 ; RV32IA-NEXT:    bnez a5, .LBB0_1
 ; RV32IA-NEXT:  .LBB0_3:
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i8_monotonic_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sb a1, 7(sp)
+; RV64I-NEXT:    addi a1, sp, 7
+; RV64I-NEXT:    mv a3, zero
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val monotonic monotonic
   ret void
 }
@@ -82,6 +97,19 @@ define void @cmpxchg_i8_acquire_monotoni
 ; RV32IA-NEXT:    bnez a5, .LBB1_1
 ; RV32IA-NEXT:  .LBB1_3:
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i8_acquire_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sb a1, 7(sp)
+; RV64I-NEXT:    addi a1, sp, 7
+; RV64I-NEXT:    addi a3, zero, 2
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val acquire monotonic
   ret void
 }
@@ -123,6 +151,19 @@ define void @cmpxchg_i8_acquire_acquire(
 ; RV32IA-NEXT:    bnez a5, .LBB2_1
 ; RV32IA-NEXT:  .LBB2_3:
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i8_acquire_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sb a1, 7(sp)
+; RV64I-NEXT:    addi a1, sp, 7
+; RV64I-NEXT:    addi a3, zero, 2
+; RV64I-NEXT:    mv a4, a3
+; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val acquire acquire
   ret void
 }
@@ -164,6 +205,19 @@ define void @cmpxchg_i8_release_monotoni
 ; RV32IA-NEXT:    bnez a5, .LBB3_1
 ; RV32IA-NEXT:  .LBB3_3:
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i8_release_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sb a1, 7(sp)
+; RV64I-NEXT:    addi a1, sp, 7
+; RV64I-NEXT:    addi a3, zero, 3
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val release monotonic
   ret void
 }
@@ -205,6 +259,19 @@ define void @cmpxchg_i8_release_acquire(
 ; RV32IA-NEXT:    bnez a5, .LBB4_1
 ; RV32IA-NEXT:  .LBB4_3:
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i8_release_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sb a1, 7(sp)
+; RV64I-NEXT:    addi a1, sp, 7
+; RV64I-NEXT:    addi a3, zero, 3
+; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val release acquire
   ret void
 }
@@ -246,6 +313,19 @@ define void @cmpxchg_i8_acq_rel_monotoni
 ; RV32IA-NEXT:    bnez a5, .LBB5_1
 ; RV32IA-NEXT:  .LBB5_3:
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i8_acq_rel_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sb a1, 7(sp)
+; RV64I-NEXT:    addi a1, sp, 7
+; RV64I-NEXT:    addi a3, zero, 4
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val acq_rel monotonic
   ret void
 }
@@ -287,6 +367,19 @@ define void @cmpxchg_i8_acq_rel_acquire(
 ; RV32IA-NEXT:    bnez a5, .LBB6_1
 ; RV32IA-NEXT:  .LBB6_3:
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i8_acq_rel_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sb a1, 7(sp)
+; RV64I-NEXT:    addi a1, sp, 7
+; RV64I-NEXT:    addi a3, zero, 4
+; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val acq_rel acquire
   ret void
 }
@@ -328,6 +421,19 @@ define void @cmpxchg_i8_seq_cst_monotoni
 ; RV32IA-NEXT:    bnez a5, .LBB7_1
 ; RV32IA-NEXT:  .LBB7_3:
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i8_seq_cst_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sb a1, 7(sp)
+; RV64I-NEXT:    addi a1, sp, 7
+; RV64I-NEXT:    addi a3, zero, 5
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val seq_cst monotonic
   ret void
 }
@@ -369,6 +475,19 @@ define void @cmpxchg_i8_seq_cst_acquire(
 ; RV32IA-NEXT:    bnez a5, .LBB8_1
 ; RV32IA-NEXT:  .LBB8_3:
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i8_seq_cst_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sb a1, 7(sp)
+; RV64I-NEXT:    addi a1, sp, 7
+; RV64I-NEXT:    addi a3, zero, 5
+; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val seq_cst acquire
   ret void
 }
@@ -410,6 +529,19 @@ define void @cmpxchg_i8_seq_cst_seq_cst(
 ; RV32IA-NEXT:    bnez a5, .LBB9_1
 ; RV32IA-NEXT:  .LBB9_3:
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i8_seq_cst_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sb a1, 7(sp)
+; RV64I-NEXT:    addi a1, sp, 7
+; RV64I-NEXT:    addi a3, zero, 5
+; RV64I-NEXT:    mv a4, a3
+; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val seq_cst seq_cst
   ret void
 }
@@ -452,6 +584,19 @@ define void @cmpxchg_i16_monotonic_monot
 ; RV32IA-NEXT:    bnez a5, .LBB10_1
 ; RV32IA-NEXT:  .LBB10_3:
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i16_monotonic_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sh a1, 6(sp)
+; RV64I-NEXT:    addi a1, sp, 6
+; RV64I-NEXT:    mv a3, zero
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val monotonic monotonic
   ret void
 }
@@ -494,6 +639,19 @@ define void @cmpxchg_i16_acquire_monoton
 ; RV32IA-NEXT:    bnez a5, .LBB11_1
 ; RV32IA-NEXT:  .LBB11_3:
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i16_acquire_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sh a1, 6(sp)
+; RV64I-NEXT:    addi a1, sp, 6
+; RV64I-NEXT:    addi a3, zero, 2
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val acquire monotonic
   ret void
 }
@@ -536,6 +694,19 @@ define void @cmpxchg_i16_acquire_acquire
 ; RV32IA-NEXT:    bnez a5, .LBB12_1
 ; RV32IA-NEXT:  .LBB12_3:
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i16_acquire_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sh a1, 6(sp)
+; RV64I-NEXT:    addi a1, sp, 6
+; RV64I-NEXT:    addi a3, zero, 2
+; RV64I-NEXT:    mv a4, a3
+; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val acquire acquire
   ret void
 }
@@ -578,6 +749,19 @@ define void @cmpxchg_i16_release_monoton
 ; RV32IA-NEXT:    bnez a5, .LBB13_1
 ; RV32IA-NEXT:  .LBB13_3:
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i16_release_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sh a1, 6(sp)
+; RV64I-NEXT:    addi a1, sp, 6
+; RV64I-NEXT:    addi a3, zero, 3
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val release monotonic
   ret void
 }
@@ -620,6 +804,19 @@ define void @cmpxchg_i16_release_acquire
 ; RV32IA-NEXT:    bnez a5, .LBB14_1
 ; RV32IA-NEXT:  .LBB14_3:
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i16_release_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sh a1, 6(sp)
+; RV64I-NEXT:    addi a1, sp, 6
+; RV64I-NEXT:    addi a3, zero, 3
+; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val release acquire
   ret void
 }
@@ -662,6 +859,19 @@ define void @cmpxchg_i16_acq_rel_monoton
 ; RV32IA-NEXT:    bnez a5, .LBB15_1
 ; RV32IA-NEXT:  .LBB15_3:
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i16_acq_rel_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sh a1, 6(sp)
+; RV64I-NEXT:    addi a1, sp, 6
+; RV64I-NEXT:    addi a3, zero, 4
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val acq_rel monotonic
   ret void
 }
@@ -704,6 +914,19 @@ define void @cmpxchg_i16_acq_rel_acquire
 ; RV32IA-NEXT:    bnez a5, .LBB16_1
 ; RV32IA-NEXT:  .LBB16_3:
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i16_acq_rel_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sh a1, 6(sp)
+; RV64I-NEXT:    addi a1, sp, 6
+; RV64I-NEXT:    addi a3, zero, 4
+; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val acq_rel acquire
   ret void
 }
@@ -746,6 +969,19 @@ define void @cmpxchg_i16_seq_cst_monoton
 ; RV32IA-NEXT:    bnez a5, .LBB17_1
 ; RV32IA-NEXT:  .LBB17_3:
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i16_seq_cst_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sh a1, 6(sp)
+; RV64I-NEXT:    addi a1, sp, 6
+; RV64I-NEXT:    addi a3, zero, 5
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val seq_cst monotonic
   ret void
 }
@@ -788,6 +1024,19 @@ define void @cmpxchg_i16_seq_cst_acquire
 ; RV32IA-NEXT:    bnez a5, .LBB18_1
 ; RV32IA-NEXT:  .LBB18_3:
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i16_seq_cst_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sh a1, 6(sp)
+; RV64I-NEXT:    addi a1, sp, 6
+; RV64I-NEXT:    addi a3, zero, 5
+; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val seq_cst acquire
   ret void
 }
@@ -830,6 +1079,19 @@ define void @cmpxchg_i16_seq_cst_seq_cst
 ; RV32IA-NEXT:    bnez a5, .LBB19_1
 ; RV32IA-NEXT:  .LBB19_3:
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i16_seq_cst_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sh a1, 6(sp)
+; RV64I-NEXT:    addi a1, sp, 6
+; RV64I-NEXT:    addi a3, zero, 5
+; RV64I-NEXT:    mv a4, a3
+; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val seq_cst seq_cst
   ret void
 }
@@ -858,6 +1120,19 @@ define void @cmpxchg_i32_monotonic_monot
 ; RV32IA-NEXT:    bnez a4, .LBB20_1
 ; RV32IA-NEXT:  .LBB20_3:
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i32_monotonic_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sw a1, 4(sp)
+; RV64I-NEXT:    addi a1, sp, 4
+; RV64I-NEXT:    mv a3, zero
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val monotonic monotonic
   ret void
 }
@@ -886,6 +1161,19 @@ define void @cmpxchg_i32_acquire_monoton
 ; RV32IA-NEXT:    bnez a4, .LBB21_1
 ; RV32IA-NEXT:  .LBB21_3:
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i32_acquire_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sw a1, 4(sp)
+; RV64I-NEXT:    addi a1, sp, 4
+; RV64I-NEXT:    addi a3, zero, 2
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val acquire monotonic
   ret void
 }
@@ -914,6 +1202,19 @@ define void @cmpxchg_i32_acquire_acquire
 ; RV32IA-NEXT:    bnez a4, .LBB22_1
 ; RV32IA-NEXT:  .LBB22_3:
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i32_acquire_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sw a1, 4(sp)
+; RV64I-NEXT:    addi a1, sp, 4
+; RV64I-NEXT:    addi a3, zero, 2
+; RV64I-NEXT:    mv a4, a3
+; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val acquire acquire
   ret void
 }
@@ -942,6 +1243,19 @@ define void @cmpxchg_i32_release_monoton
 ; RV32IA-NEXT:    bnez a4, .LBB23_1
 ; RV32IA-NEXT:  .LBB23_3:
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i32_release_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sw a1, 4(sp)
+; RV64I-NEXT:    addi a1, sp, 4
+; RV64I-NEXT:    addi a3, zero, 3
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val release monotonic
   ret void
 }
@@ -970,6 +1284,19 @@ define void @cmpxchg_i32_release_acquire
 ; RV32IA-NEXT:    bnez a4, .LBB24_1
 ; RV32IA-NEXT:  .LBB24_3:
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i32_release_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sw a1, 4(sp)
+; RV64I-NEXT:    addi a1, sp, 4
+; RV64I-NEXT:    addi a3, zero, 3
+; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val release acquire
   ret void
 }
@@ -998,6 +1325,19 @@ define void @cmpxchg_i32_acq_rel_monoton
 ; RV32IA-NEXT:    bnez a4, .LBB25_1
 ; RV32IA-NEXT:  .LBB25_3:
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i32_acq_rel_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sw a1, 4(sp)
+; RV64I-NEXT:    addi a1, sp, 4
+; RV64I-NEXT:    addi a3, zero, 4
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val acq_rel monotonic
   ret void
 }
@@ -1026,6 +1366,19 @@ define void @cmpxchg_i32_acq_rel_acquire
 ; RV32IA-NEXT:    bnez a4, .LBB26_1
 ; RV32IA-NEXT:  .LBB26_3:
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i32_acq_rel_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sw a1, 4(sp)
+; RV64I-NEXT:    addi a1, sp, 4
+; RV64I-NEXT:    addi a3, zero, 4
+; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val acq_rel acquire
   ret void
 }
@@ -1054,6 +1407,19 @@ define void @cmpxchg_i32_seq_cst_monoton
 ; RV32IA-NEXT:    bnez a4, .LBB27_1
 ; RV32IA-NEXT:  .LBB27_3:
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i32_seq_cst_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sw a1, 4(sp)
+; RV64I-NEXT:    addi a1, sp, 4
+; RV64I-NEXT:    addi a3, zero, 5
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val seq_cst monotonic
   ret void
 }
@@ -1082,6 +1448,19 @@ define void @cmpxchg_i32_seq_cst_acquire
 ; RV32IA-NEXT:    bnez a4, .LBB28_1
 ; RV32IA-NEXT:  .LBB28_3:
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i32_seq_cst_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sw a1, 4(sp)
+; RV64I-NEXT:    addi a1, sp, 4
+; RV64I-NEXT:    addi a3, zero, 5
+; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val seq_cst acquire
   ret void
 }
@@ -1110,6 +1489,19 @@ define void @cmpxchg_i32_seq_cst_seq_cst
 ; RV32IA-NEXT:    bnez a4, .LBB29_1
 ; RV32IA-NEXT:  .LBB29_3:
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i32_seq_cst_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sw a1, 4(sp)
+; RV64I-NEXT:    addi a1, sp, 4
+; RV64I-NEXT:    addi a3, zero, 5
+; RV64I-NEXT:    mv a4, a3
+; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val seq_cst seq_cst
   ret void
 }
@@ -1146,6 +1538,19 @@ define void @cmpxchg_i64_monotonic_monot
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i64_monotonic_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd a1, 0(sp)
+; RV64I-NEXT:    mv a1, sp
+; RV64I-NEXT:    mv a3, zero
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val monotonic monotonic
   ret void
 }
@@ -1184,6 +1589,19 @@ define void @cmpxchg_i64_acquire_monoton
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i64_acquire_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd a1, 0(sp)
+; RV64I-NEXT:    mv a1, sp
+; RV64I-NEXT:    addi a3, zero, 2
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val acquire monotonic
   ret void
 }
@@ -1220,6 +1638,19 @@ define void @cmpxchg_i64_acquire_acquire
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i64_acquire_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd a1, 0(sp)
+; RV64I-NEXT:    mv a1, sp
+; RV64I-NEXT:    addi a3, zero, 2
+; RV64I-NEXT:    mv a4, a3
+; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val acquire acquire
   ret void
 }
@@ -1258,6 +1689,19 @@ define void @cmpxchg_i64_release_monoton
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i64_release_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd a1, 0(sp)
+; RV64I-NEXT:    mv a1, sp
+; RV64I-NEXT:    addi a3, zero, 3
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val release monotonic
   ret void
 }
@@ -1296,6 +1740,19 @@ define void @cmpxchg_i64_release_acquire
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i64_release_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd a1, 0(sp)
+; RV64I-NEXT:    mv a1, sp
+; RV64I-NEXT:    addi a3, zero, 3
+; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val release acquire
   ret void
 }
@@ -1334,6 +1791,19 @@ define void @cmpxchg_i64_acq_rel_monoton
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i64_acq_rel_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd a1, 0(sp)
+; RV64I-NEXT:    mv a1, sp
+; RV64I-NEXT:    addi a3, zero, 4
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val acq_rel monotonic
   ret void
 }
@@ -1372,6 +1842,19 @@ define void @cmpxchg_i64_acq_rel_acquire
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i64_acq_rel_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd a1, 0(sp)
+; RV64I-NEXT:    mv a1, sp
+; RV64I-NEXT:    addi a3, zero, 4
+; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val acq_rel acquire
   ret void
 }
@@ -1410,6 +1893,19 @@ define void @cmpxchg_i64_seq_cst_monoton
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i64_seq_cst_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd a1, 0(sp)
+; RV64I-NEXT:    mv a1, sp
+; RV64I-NEXT:    addi a3, zero, 5
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val seq_cst monotonic
   ret void
 }
@@ -1448,6 +1944,19 @@ define void @cmpxchg_i64_seq_cst_acquire
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i64_seq_cst_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd a1, 0(sp)
+; RV64I-NEXT:    mv a1, sp
+; RV64I-NEXT:    addi a3, zero, 5
+; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val seq_cst acquire
   ret void
 }
@@ -1484,6 +1993,19 @@ define void @cmpxchg_i64_seq_cst_seq_cst
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: cmpxchg_i64_seq_cst_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    sd a1, 0(sp)
+; RV64I-NEXT:    mv a1, sp
+; RV64I-NEXT:    addi a3, zero, 5
+; RV64I-NEXT:    mv a4, a3
+; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val seq_cst seq_cst
   ret void
 }

Modified: llvm/trunk/test/CodeGen/RISCV/atomic-fence.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/RISCV/atomic-fence.ll?rev=350962&r1=350961&r2=350962&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/RISCV/atomic-fence.ll (original)
+++ llvm/trunk/test/CodeGen/RISCV/atomic-fence.ll Fri Jan 11 11:46:48 2019
@@ -3,12 +3,21 @@
 ; RUN:   | FileCheck -check-prefix=RV32I %s
 ; RUN: llc -mtriple=riscv32 -mattr=+a -verify-machineinstrs < %s \
 ; RUN:   | FileCheck -check-prefix=RV32I %s
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV64I %s
+; RUN: llc -mtriple=riscv64 -mattr=+a -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV64I %s
 
 define void @fence_acquire() nounwind {
 ; RV32I-LABEL: fence_acquire:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    fence r, rw
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fence_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    fence r, rw
+; RV64I-NEXT:    ret
   fence acquire
   ret void
 }
@@ -18,6 +27,11 @@ define void @fence_release() nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    fence rw, w
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fence_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    fence rw, w
+; RV64I-NEXT:    ret
   fence release
   ret void
 }
@@ -27,6 +41,11 @@ define void @fence_acq_rel() nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    fence.tso
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fence_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    fence.tso
+; RV64I-NEXT:    ret
   fence acq_rel
   ret void
 }
@@ -36,6 +55,11 @@ define void @fence_seq_cst() nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    fence rw, rw
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fence_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    fence rw, rw
+; RV64I-NEXT:    ret
   fence seq_cst
   ret void
 }

Modified: llvm/trunk/test/CodeGen/RISCV/atomic-load-store.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/RISCV/atomic-load-store.ll?rev=350962&r1=350961&r2=350962&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/RISCV/atomic-load-store.ll (original)
+++ llvm/trunk/test/CodeGen/RISCV/atomic-load-store.ll Fri Jan 11 11:46:48 2019
@@ -3,6 +3,8 @@
 ; RUN:   | FileCheck -check-prefix=RV32I %s
 ; RUN: llc -mtriple=riscv32 -mattr=+a -verify-machineinstrs < %s \
 ; RUN:   | FileCheck -check-prefix=RV32IA %s
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV64I %s
 
 define i8 @atomic_load_i8_unordered(i8 *%a) nounwind {
 ; RV32I-LABEL: atomic_load_i8_unordered:
@@ -19,6 +21,16 @@ define i8 @atomic_load_i8_unordered(i8 *
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    lb a0, 0(a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomic_load_i8_unordered:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    call __atomic_load_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = load atomic i8, i8* %a unordered, align 1
   ret i8 %1
 }
@@ -38,6 +50,16 @@ define i8 @atomic_load_i8_monotonic(i8 *
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    lb a0, 0(a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomic_load_i8_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    call __atomic_load_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = load atomic i8, i8* %a monotonic, align 1
   ret i8 %1
 }
@@ -58,6 +80,16 @@ define i8 @atomic_load_i8_acquire(i8 *%a
 ; RV32IA-NEXT:    lb a0, 0(a0)
 ; RV32IA-NEXT:    fence r, rw
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomic_load_i8_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a1, zero, 2
+; RV64I-NEXT:    call __atomic_load_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = load atomic i8, i8* %a acquire, align 1
   ret i8 %1
 }
@@ -79,6 +111,16 @@ define i8 @atomic_load_i8_seq_cst(i8 *%a
 ; RV32IA-NEXT:    lb a0, 0(a0)
 ; RV32IA-NEXT:    fence r, rw
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomic_load_i8_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a1, zero, 5
+; RV64I-NEXT:    call __atomic_load_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = load atomic i8, i8* %a seq_cst, align 1
   ret i8 %1
 }
@@ -98,6 +140,16 @@ define i16 @atomic_load_i16_unordered(i1
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    lh a0, 0(a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomic_load_i16_unordered:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    call __atomic_load_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = load atomic i16, i16* %a unordered, align 2
   ret i16 %1
 }
@@ -117,6 +169,16 @@ define i16 @atomic_load_i16_monotonic(i1
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    lh a0, 0(a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomic_load_i16_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    call __atomic_load_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = load atomic i16, i16* %a monotonic, align 2
   ret i16 %1
 }
@@ -137,6 +199,16 @@ define i16 @atomic_load_i16_acquire(i16
 ; RV32IA-NEXT:    lh a0, 0(a0)
 ; RV32IA-NEXT:    fence r, rw
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomic_load_i16_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a1, zero, 2
+; RV64I-NEXT:    call __atomic_load_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = load atomic i16, i16* %a acquire, align 2
   ret i16 %1
 }
@@ -158,6 +230,16 @@ define i16 @atomic_load_i16_seq_cst(i16
 ; RV32IA-NEXT:    lh a0, 0(a0)
 ; RV32IA-NEXT:    fence r, rw
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomic_load_i16_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a1, zero, 5
+; RV64I-NEXT:    call __atomic_load_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = load atomic i16, i16* %a seq_cst, align 2
   ret i16 %1
 }
@@ -177,6 +259,16 @@ define i32 @atomic_load_i32_unordered(i3
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    lw a0, 0(a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomic_load_i32_unordered:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    call __atomic_load_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = load atomic i32, i32* %a unordered, align 4
   ret i32 %1
 }
@@ -196,6 +288,16 @@ define i32 @atomic_load_i32_monotonic(i3
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    lw a0, 0(a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomic_load_i32_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    call __atomic_load_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = load atomic i32, i32* %a monotonic, align 4
   ret i32 %1
 }
@@ -216,6 +318,16 @@ define i32 @atomic_load_i32_acquire(i32
 ; RV32IA-NEXT:    lw a0, 0(a0)
 ; RV32IA-NEXT:    fence r, rw
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomic_load_i32_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a1, zero, 2
+; RV64I-NEXT:    call __atomic_load_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = load atomic i32, i32* %a acquire, align 4
   ret i32 %1
 }
@@ -237,6 +349,16 @@ define i32 @atomic_load_i32_seq_cst(i32
 ; RV32IA-NEXT:    lw a0, 0(a0)
 ; RV32IA-NEXT:    fence r, rw
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomic_load_i32_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a1, zero, 5
+; RV64I-NEXT:    call __atomic_load_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = load atomic i32, i32* %a seq_cst, align 4
   ret i32 %1
 }
@@ -261,6 +383,16 @@ define i64 @atomic_load_i64_unordered(i6
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomic_load_i64_unordered:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    call __atomic_load_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = load atomic i64, i64* %a unordered, align 8
   ret i64 %1
 }
@@ -285,6 +417,16 @@ define i64 @atomic_load_i64_monotonic(i6
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomic_load_i64_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    call __atomic_load_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = load atomic i64, i64* %a monotonic, align 8
   ret i64 %1
 }
@@ -309,6 +451,16 @@ define i64 @atomic_load_i64_acquire(i64
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomic_load_i64_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a1, zero, 2
+; RV64I-NEXT:    call __atomic_load_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = load atomic i64, i64* %a acquire, align 8
   ret i64 %1
 }
@@ -333,6 +485,16 @@ define i64 @atomic_load_i64_seq_cst(i64
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomic_load_i64_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a1, zero, 5
+; RV64I-NEXT:    call __atomic_load_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = load atomic i64, i64* %a seq_cst, align 8
   ret i64 %1
 }
@@ -352,6 +514,16 @@ define void @atomic_store_i8_unordered(i
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    sb a1, 0(a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomic_store_i8_unordered:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_store_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   store atomic i8 %b, i8* %a unordered, align 1
   ret void
 }
@@ -371,6 +543,16 @@ define void @atomic_store_i8_monotonic(i
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    sb a1, 0(a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomic_store_i8_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_store_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   store atomic i8 %b, i8* %a monotonic, align 1
   ret void
 }
@@ -391,6 +573,16 @@ define void @atomic_store_i8_release(i8
 ; RV32IA-NEXT:    fence rw, w
 ; RV32IA-NEXT:    sb a1, 0(a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomic_store_i8_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    call __atomic_store_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   store atomic i8 %b, i8* %a release, align 1
   ret void
 }
@@ -411,6 +603,16 @@ define void @atomic_store_i8_seq_cst(i8
 ; RV32IA-NEXT:    fence rw, w
 ; RV32IA-NEXT:    sb a1, 0(a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomic_store_i8_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    call __atomic_store_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   store atomic i8 %b, i8* %a seq_cst, align 1
   ret void
 }
@@ -430,6 +632,16 @@ define void @atomic_store_i16_unordered(
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    sh a1, 0(a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomic_store_i16_unordered:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_store_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   store atomic i16 %b, i16* %a unordered, align 2
   ret void
 }
@@ -449,6 +661,16 @@ define void @atomic_store_i16_monotonic(
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    sh a1, 0(a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomic_store_i16_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_store_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   store atomic i16 %b, i16* %a monotonic, align 2
   ret void
 }
@@ -469,6 +691,16 @@ define void @atomic_store_i16_release(i1
 ; RV32IA-NEXT:    fence rw, w
 ; RV32IA-NEXT:    sh a1, 0(a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomic_store_i16_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    call __atomic_store_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   store atomic i16 %b, i16* %a release, align 2
   ret void
 }
@@ -489,6 +721,16 @@ define void @atomic_store_i16_seq_cst(i1
 ; RV32IA-NEXT:    fence rw, w
 ; RV32IA-NEXT:    sh a1, 0(a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomic_store_i16_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    call __atomic_store_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   store atomic i16 %b, i16* %a seq_cst, align 2
   ret void
 }
@@ -508,6 +750,16 @@ define void @atomic_store_i32_unordered(
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    sw a1, 0(a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomic_store_i32_unordered:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_store_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   store atomic i32 %b, i32* %a unordered, align 4
   ret void
 }
@@ -527,6 +779,16 @@ define void @atomic_store_i32_monotonic(
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    sw a1, 0(a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomic_store_i32_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_store_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   store atomic i32 %b, i32* %a monotonic, align 4
   ret void
 }
@@ -547,6 +809,16 @@ define void @atomic_store_i32_release(i3
 ; RV32IA-NEXT:    fence rw, w
 ; RV32IA-NEXT:    sw a1, 0(a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomic_store_i32_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    call __atomic_store_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   store atomic i32 %b, i32* %a release, align 4
   ret void
 }
@@ -567,6 +839,16 @@ define void @atomic_store_i32_seq_cst(i3
 ; RV32IA-NEXT:    fence rw, w
 ; RV32IA-NEXT:    sw a1, 0(a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomic_store_i32_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    call __atomic_store_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   store atomic i32 %b, i32* %a seq_cst, align 4
   ret void
 }
@@ -591,6 +873,16 @@ define void @atomic_store_i64_unordered(
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomic_store_i64_unordered:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_store_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   store atomic i64 %b, i64* %a unordered, align 8
   ret void
 }
@@ -615,6 +907,16 @@ define void @atomic_store_i64_monotonic(
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomic_store_i64_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_store_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   store atomic i64 %b, i64* %a monotonic, align 8
   ret void
 }
@@ -639,6 +941,16 @@ define void @atomic_store_i64_release(i6
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomic_store_i64_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    call __atomic_store_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   store atomic i64 %b, i64* %a release, align 8
   ret void
 }
@@ -663,6 +975,16 @@ define void @atomic_store_i64_seq_cst(i6
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomic_store_i64_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    call __atomic_store_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   store atomic i64 %b, i64* %a seq_cst, align 8
   ret void
 }

Modified: llvm/trunk/test/CodeGen/RISCV/atomic-rmw.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/RISCV/atomic-rmw.ll?rev=350962&r1=350961&r2=350962&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/RISCV/atomic-rmw.ll (original)
+++ llvm/trunk/test/CodeGen/RISCV/atomic-rmw.ll Fri Jan 11 11:46:48 2019
@@ -3,6 +3,8 @@
 ; RUN:   | FileCheck -check-prefix=RV32I %s
 ; RUN: llc -mtriple=riscv32 -mattr=+a -verify-machineinstrs < %s \
 ; RUN:   | FileCheck -check-prefix=RV32IA %s
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV64I %s
 
 define i8 @atomicrmw_xchg_i8_monotonic(i8* %a, i8 %b) {
 ; RV32I-LABEL: atomicrmw_xchg_i8_monotonic:
@@ -35,6 +37,16 @@ define i8 @atomicrmw_xchg_i8_monotonic(i
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xchg_i8_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_exchange_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xchg i8* %a, i8 %b monotonic
   ret i8 %1
 }
@@ -70,6 +82,16 @@ define i8 @atomicrmw_xchg_i8_acquire(i8*
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xchg_i8_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    call __atomic_exchange_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xchg i8* %a, i8 %b acquire
   ret i8 %1
 }
@@ -105,6 +127,16 @@ define i8 @atomicrmw_xchg_i8_release(i8*
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xchg_i8_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    call __atomic_exchange_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xchg i8* %a, i8 %b release
   ret i8 %1
 }
@@ -140,6 +172,16 @@ define i8 @atomicrmw_xchg_i8_acq_rel(i8*
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xchg_i8_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    call __atomic_exchange_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xchg i8* %a, i8 %b acq_rel
   ret i8 %1
 }
@@ -175,6 +217,16 @@ define i8 @atomicrmw_xchg_i8_seq_cst(i8*
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xchg_i8_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    call __atomic_exchange_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xchg i8* %a, i8 %b seq_cst
   ret i8 %1
 }
@@ -210,6 +262,16 @@ define i8 @atomicrmw_add_i8_monotonic(i8
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_add_i8_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_fetch_add_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw add i8* %a, i8 %b monotonic
   ret i8 %1
 }
@@ -245,6 +307,16 @@ define i8 @atomicrmw_add_i8_acquire(i8 *
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_add_i8_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    call __atomic_fetch_add_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw add i8* %a, i8 %b acquire
   ret i8 %1
 }
@@ -280,6 +352,16 @@ define i8 @atomicrmw_add_i8_release(i8 *
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_add_i8_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    call __atomic_fetch_add_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw add i8* %a, i8 %b release
   ret i8 %1
 }
@@ -315,6 +397,16 @@ define i8 @atomicrmw_add_i8_acq_rel(i8 *
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_add_i8_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    call __atomic_fetch_add_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw add i8* %a, i8 %b acq_rel
   ret i8 %1
 }
@@ -350,6 +442,16 @@ define i8 @atomicrmw_add_i8_seq_cst(i8 *
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_add_i8_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    call __atomic_fetch_add_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw add i8* %a, i8 %b seq_cst
   ret i8 %1
 }
@@ -385,6 +487,16 @@ define i8 @atomicrmw_sub_i8_monotonic(i8
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_sub_i8_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_fetch_sub_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw sub i8* %a, i8 %b monotonic
   ret i8 %1
 }
@@ -420,6 +532,16 @@ define i8 @atomicrmw_sub_i8_acquire(i8*
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_sub_i8_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    call __atomic_fetch_sub_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw sub i8* %a, i8 %b acquire
   ret i8 %1
 }
@@ -455,6 +577,16 @@ define i8 @atomicrmw_sub_i8_release(i8*
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_sub_i8_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    call __atomic_fetch_sub_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw sub i8* %a, i8 %b release
   ret i8 %1
 }
@@ -490,6 +622,16 @@ define i8 @atomicrmw_sub_i8_acq_rel(i8*
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_sub_i8_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    call __atomic_fetch_sub_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw sub i8* %a, i8 %b acq_rel
   ret i8 %1
 }
@@ -525,6 +667,16 @@ define i8 @atomicrmw_sub_i8_seq_cst(i8*
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_sub_i8_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    call __atomic_fetch_sub_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw sub i8* %a, i8 %b seq_cst
   ret i8 %1
 }
@@ -554,6 +706,16 @@ define i8 @atomicrmw_and_i8_monotonic(i8
 ; RV32IA-NEXT:    amoand.w a0, a1, (a0)
 ; RV32IA-NEXT:    srl a0, a0, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_and_i8_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_fetch_and_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw and i8* %a, i8 %b monotonic
   ret i8 %1
 }
@@ -583,6 +745,16 @@ define i8 @atomicrmw_and_i8_acquire(i8 *
 ; RV32IA-NEXT:    amoand.w.aq a0, a1, (a0)
 ; RV32IA-NEXT:    srl a0, a0, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_and_i8_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    call __atomic_fetch_and_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw and i8* %a, i8 %b acquire
   ret i8 %1
 }
@@ -612,6 +784,16 @@ define i8 @atomicrmw_and_i8_release(i8 *
 ; RV32IA-NEXT:    amoand.w.rl a0, a1, (a0)
 ; RV32IA-NEXT:    srl a0, a0, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_and_i8_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    call __atomic_fetch_and_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw and i8* %a, i8 %b release
   ret i8 %1
 }
@@ -641,6 +823,16 @@ define i8 @atomicrmw_and_i8_acq_rel(i8 *
 ; RV32IA-NEXT:    amoand.w.aqrl a0, a1, (a0)
 ; RV32IA-NEXT:    srl a0, a0, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_and_i8_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    call __atomic_fetch_and_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw and i8* %a, i8 %b acq_rel
   ret i8 %1
 }
@@ -670,6 +862,16 @@ define i8 @atomicrmw_and_i8_seq_cst(i8 *
 ; RV32IA-NEXT:    amoand.w.aqrl a0, a1, (a0)
 ; RV32IA-NEXT:    srl a0, a0, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_and_i8_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    call __atomic_fetch_and_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw and i8* %a, i8 %b seq_cst
   ret i8 %1
 }
@@ -706,6 +908,16 @@ define i8 @atomicrmw_nand_i8_monotonic(i
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_nand_i8_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_fetch_nand_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw nand i8* %a, i8 %b monotonic
   ret i8 %1
 }
@@ -742,6 +954,16 @@ define i8 @atomicrmw_nand_i8_acquire(i8*
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_nand_i8_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    call __atomic_fetch_nand_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw nand i8* %a, i8 %b acquire
   ret i8 %1
 }
@@ -778,6 +1000,16 @@ define i8 @atomicrmw_nand_i8_release(i8*
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_nand_i8_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    call __atomic_fetch_nand_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw nand i8* %a, i8 %b release
   ret i8 %1
 }
@@ -814,6 +1046,16 @@ define i8 @atomicrmw_nand_i8_acq_rel(i8*
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_nand_i8_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    call __atomic_fetch_nand_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw nand i8* %a, i8 %b acq_rel
   ret i8 %1
 }
@@ -850,6 +1092,16 @@ define i8 @atomicrmw_nand_i8_seq_cst(i8*
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_nand_i8_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    call __atomic_fetch_nand_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw nand i8* %a, i8 %b seq_cst
   ret i8 %1
 }
@@ -875,6 +1127,16 @@ define i8 @atomicrmw_or_i8_monotonic(i8
 ; RV32IA-NEXT:    amoor.w a0, a1, (a0)
 ; RV32IA-NEXT:    srl a0, a0, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_or_i8_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_fetch_or_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw or i8* %a, i8 %b monotonic
   ret i8 %1
 }
@@ -900,6 +1162,16 @@ define i8 @atomicrmw_or_i8_acquire(i8 *%
 ; RV32IA-NEXT:    amoor.w.aq a0, a1, (a0)
 ; RV32IA-NEXT:    srl a0, a0, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_or_i8_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    call __atomic_fetch_or_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw or i8* %a, i8 %b acquire
   ret i8 %1
 }
@@ -925,6 +1197,16 @@ define i8 @atomicrmw_or_i8_release(i8 *%
 ; RV32IA-NEXT:    amoor.w.rl a0, a1, (a0)
 ; RV32IA-NEXT:    srl a0, a0, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_or_i8_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    call __atomic_fetch_or_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw or i8* %a, i8 %b release
   ret i8 %1
 }
@@ -950,6 +1232,16 @@ define i8 @atomicrmw_or_i8_acq_rel(i8 *%
 ; RV32IA-NEXT:    amoor.w.aqrl a0, a1, (a0)
 ; RV32IA-NEXT:    srl a0, a0, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_or_i8_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    call __atomic_fetch_or_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw or i8* %a, i8 %b acq_rel
   ret i8 %1
 }
@@ -975,6 +1267,16 @@ define i8 @atomicrmw_or_i8_seq_cst(i8 *%
 ; RV32IA-NEXT:    amoor.w.aqrl a0, a1, (a0)
 ; RV32IA-NEXT:    srl a0, a0, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_or_i8_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    call __atomic_fetch_or_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw or i8* %a, i8 %b seq_cst
   ret i8 %1
 }
@@ -1000,6 +1302,16 @@ define i8 @atomicrmw_xor_i8_monotonic(i8
 ; RV32IA-NEXT:    amoxor.w a0, a1, (a0)
 ; RV32IA-NEXT:    srl a0, a0, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xor_i8_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_fetch_xor_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xor i8* %a, i8 %b monotonic
   ret i8 %1
 }
@@ -1025,6 +1337,16 @@ define i8 @atomicrmw_xor_i8_acquire(i8 *
 ; RV32IA-NEXT:    amoxor.w.aq a0, a1, (a0)
 ; RV32IA-NEXT:    srl a0, a0, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xor_i8_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    call __atomic_fetch_xor_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xor i8* %a, i8 %b acquire
   ret i8 %1
 }
@@ -1050,6 +1372,16 @@ define i8 @atomicrmw_xor_i8_release(i8 *
 ; RV32IA-NEXT:    amoxor.w.rl a0, a1, (a0)
 ; RV32IA-NEXT:    srl a0, a0, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xor_i8_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    call __atomic_fetch_xor_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xor i8* %a, i8 %b release
   ret i8 %1
 }
@@ -1075,6 +1407,16 @@ define i8 @atomicrmw_xor_i8_acq_rel(i8 *
 ; RV32IA-NEXT:    amoxor.w.aqrl a0, a1, (a0)
 ; RV32IA-NEXT:    srl a0, a0, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xor_i8_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    call __atomic_fetch_xor_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xor i8* %a, i8 %b acq_rel
   ret i8 %1
 }
@@ -1100,6 +1442,16 @@ define i8 @atomicrmw_xor_i8_seq_cst(i8 *
 ; RV32IA-NEXT:    amoxor.w.aqrl a0, a1, (a0)
 ; RV32IA-NEXT:    srl a0, a0, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xor_i8_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    call __atomic_fetch_xor_1
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xor i8* %a, i8 %b seq_cst
   ret i8 %1
 }
@@ -1177,6 +1529,49 @@ define i8 @atomicrmw_max_i8_monotonic(i8
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a5, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_max_i8_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -48
+; RV64I-NEXT:    sd ra, 40(sp)
+; RV64I-NEXT:    sd s1, 32(sp)
+; RV64I-NEXT:    sd s2, 24(sp)
+; RV64I-NEXT:    sd s3, 16(sp)
+; RV64I-NEXT:    sd s4, 8(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lbu a0, 0(a0)
+; RV64I-NEXT:    slli a1, a1, 56
+; RV64I-NEXT:    srai s1, a1, 56
+; RV64I-NEXT:    addi s3, sp, 7
+; RV64I-NEXT:  .LBB35_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    slli a1, a0, 56
+; RV64I-NEXT:    srai a1, a1, 56
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    blt s1, a1, .LBB35_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB35_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB35_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB35_1 Depth=1
+; RV64I-NEXT:    sb a0, 7(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, zero
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lb a0, 7(sp)
+; RV64I-NEXT:    beqz a1, .LBB35_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s4, 8(sp)
+; RV64I-NEXT:    ld s3, 16(sp)
+; RV64I-NEXT:    ld s2, 24(sp)
+; RV64I-NEXT:    ld s1, 32(sp)
+; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    ret
   %1 = atomicrmw max i8* %a, i8 %b monotonic
   ret i8 %1
 }
@@ -1257,6 +1652,52 @@ define i8 @atomicrmw_max_i8_acquire(i8 *
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a5, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_max_i8_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lbu a0, 0(a0)
+; RV64I-NEXT:    slli a1, a1, 56
+; RV64I-NEXT:    srai s5, a1, 56
+; RV64I-NEXT:    addi s3, sp, 15
+; RV64I-NEXT:    addi s1, zero, 2
+; RV64I-NEXT:  .LBB36_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    slli a1, a0, 56
+; RV64I-NEXT:    srai a1, a1, 56
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    blt s5, a1, .LBB36_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB36_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB36_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB36_1 Depth=1
+; RV64I-NEXT:    sb a0, 15(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s1
+; RV64I-NEXT:    mv a4, s1
+; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lb a0, 15(sp)
+; RV64I-NEXT:    beqz a1, .LBB36_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw max i8* %a, i8 %b acquire
   ret i8 %1
 }
@@ -1337,6 +1778,52 @@ define i8 @atomicrmw_max_i8_release(i8 *
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a5, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_max_i8_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s5, a0
+; RV64I-NEXT:    lbu a0, 0(a0)
+; RV64I-NEXT:    slli a1, a1, 56
+; RV64I-NEXT:    srai s1, a1, 56
+; RV64I-NEXT:    addi s3, sp, 15
+; RV64I-NEXT:    addi s4, zero, 3
+; RV64I-NEXT:  .LBB37_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    slli a1, a0, 56
+; RV64I-NEXT:    srai a1, a1, 56
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    blt s1, a1, .LBB37_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB37_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB37_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB37_1 Depth=1
+; RV64I-NEXT:    sb a0, 15(sp)
+; RV64I-NEXT:    mv a0, s5
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lb a0, 15(sp)
+; RV64I-NEXT:    beqz a1, .LBB37_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw max i8* %a, i8 %b release
   ret i8 %1
 }
@@ -1420,6 +1907,55 @@ define i8 @atomicrmw_max_i8_acq_rel(i8 *
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a5, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_max_i8_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    sd s6, 8(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s6, a0
+; RV64I-NEXT:    lbu a0, 0(a0)
+; RV64I-NEXT:    slli a1, a1, 56
+; RV64I-NEXT:    srai s1, a1, 56
+; RV64I-NEXT:    addi s3, sp, 7
+; RV64I-NEXT:    addi s4, zero, 4
+; RV64I-NEXT:    addi s5, zero, 2
+; RV64I-NEXT:  .LBB38_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    slli a1, a0, 56
+; RV64I-NEXT:    srai a1, a1, 56
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    blt s1, a1, .LBB38_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB38_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB38_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB38_1 Depth=1
+; RV64I-NEXT:    sb a0, 7(sp)
+; RV64I-NEXT:    mv a0, s6
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, s5
+; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lb a0, 7(sp)
+; RV64I-NEXT:    beqz a1, .LBB38_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s6, 8(sp)
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw max i8* %a, i8 %b acq_rel
   ret i8 %1
 }
@@ -1500,6 +2036,52 @@ define i8 @atomicrmw_max_i8_seq_cst(i8 *
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a5, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_max_i8_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lbu a0, 0(a0)
+; RV64I-NEXT:    slli a1, a1, 56
+; RV64I-NEXT:    srai s5, a1, 56
+; RV64I-NEXT:    addi s3, sp, 15
+; RV64I-NEXT:    addi s1, zero, 5
+; RV64I-NEXT:  .LBB39_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    slli a1, a0, 56
+; RV64I-NEXT:    srai a1, a1, 56
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    blt s5, a1, .LBB39_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB39_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB39_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB39_1 Depth=1
+; RV64I-NEXT:    sb a0, 15(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s1
+; RV64I-NEXT:    mv a4, s1
+; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lb a0, 15(sp)
+; RV64I-NEXT:    beqz a1, .LBB39_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw max i8* %a, i8 %b seq_cst
   ret i8 %1
 }
@@ -1577,6 +2159,49 @@ define i8 @atomicrmw_min_i8_monotonic(i8
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a5, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_min_i8_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -48
+; RV64I-NEXT:    sd ra, 40(sp)
+; RV64I-NEXT:    sd s1, 32(sp)
+; RV64I-NEXT:    sd s2, 24(sp)
+; RV64I-NEXT:    sd s3, 16(sp)
+; RV64I-NEXT:    sd s4, 8(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lbu a0, 0(a0)
+; RV64I-NEXT:    slli a1, a1, 56
+; RV64I-NEXT:    srai s1, a1, 56
+; RV64I-NEXT:    addi s3, sp, 7
+; RV64I-NEXT:  .LBB40_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    slli a1, a0, 56
+; RV64I-NEXT:    srai a1, a1, 56
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bge s1, a1, .LBB40_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB40_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB40_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB40_1 Depth=1
+; RV64I-NEXT:    sb a0, 7(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, zero
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lb a0, 7(sp)
+; RV64I-NEXT:    beqz a1, .LBB40_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s4, 8(sp)
+; RV64I-NEXT:    ld s3, 16(sp)
+; RV64I-NEXT:    ld s2, 24(sp)
+; RV64I-NEXT:    ld s1, 32(sp)
+; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    ret
   %1 = atomicrmw min i8* %a, i8 %b monotonic
   ret i8 %1
 }
@@ -1657,6 +2282,52 @@ define i8 @atomicrmw_min_i8_acquire(i8 *
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a5, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_min_i8_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lbu a0, 0(a0)
+; RV64I-NEXT:    slli a1, a1, 56
+; RV64I-NEXT:    srai s5, a1, 56
+; RV64I-NEXT:    addi s3, sp, 15
+; RV64I-NEXT:    addi s1, zero, 2
+; RV64I-NEXT:  .LBB41_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    slli a1, a0, 56
+; RV64I-NEXT:    srai a1, a1, 56
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bge s5, a1, .LBB41_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB41_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB41_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB41_1 Depth=1
+; RV64I-NEXT:    sb a0, 15(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s1
+; RV64I-NEXT:    mv a4, s1
+; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lb a0, 15(sp)
+; RV64I-NEXT:    beqz a1, .LBB41_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw min i8* %a, i8 %b acquire
   ret i8 %1
 }
@@ -1737,6 +2408,52 @@ define i8 @atomicrmw_min_i8_release(i8 *
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a5, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_min_i8_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s5, a0
+; RV64I-NEXT:    lbu a0, 0(a0)
+; RV64I-NEXT:    slli a1, a1, 56
+; RV64I-NEXT:    srai s1, a1, 56
+; RV64I-NEXT:    addi s3, sp, 15
+; RV64I-NEXT:    addi s4, zero, 3
+; RV64I-NEXT:  .LBB42_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    slli a1, a0, 56
+; RV64I-NEXT:    srai a1, a1, 56
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bge s1, a1, .LBB42_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB42_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB42_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB42_1 Depth=1
+; RV64I-NEXT:    sb a0, 15(sp)
+; RV64I-NEXT:    mv a0, s5
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lb a0, 15(sp)
+; RV64I-NEXT:    beqz a1, .LBB42_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw min i8* %a, i8 %b release
   ret i8 %1
 }
@@ -1820,6 +2537,55 @@ define i8 @atomicrmw_min_i8_acq_rel(i8 *
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a5, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_min_i8_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    sd s6, 8(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s6, a0
+; RV64I-NEXT:    lbu a0, 0(a0)
+; RV64I-NEXT:    slli a1, a1, 56
+; RV64I-NEXT:    srai s1, a1, 56
+; RV64I-NEXT:    addi s3, sp, 7
+; RV64I-NEXT:    addi s4, zero, 4
+; RV64I-NEXT:    addi s5, zero, 2
+; RV64I-NEXT:  .LBB43_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    slli a1, a0, 56
+; RV64I-NEXT:    srai a1, a1, 56
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bge s1, a1, .LBB43_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB43_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB43_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB43_1 Depth=1
+; RV64I-NEXT:    sb a0, 7(sp)
+; RV64I-NEXT:    mv a0, s6
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, s5
+; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lb a0, 7(sp)
+; RV64I-NEXT:    beqz a1, .LBB43_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s6, 8(sp)
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw min i8* %a, i8 %b acq_rel
   ret i8 %1
 }
@@ -1900,6 +2666,52 @@ define i8 @atomicrmw_min_i8_seq_cst(i8 *
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a5, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_min_i8_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lbu a0, 0(a0)
+; RV64I-NEXT:    slli a1, a1, 56
+; RV64I-NEXT:    srai s5, a1, 56
+; RV64I-NEXT:    addi s3, sp, 15
+; RV64I-NEXT:    addi s1, zero, 5
+; RV64I-NEXT:  .LBB44_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    slli a1, a0, 56
+; RV64I-NEXT:    srai a1, a1, 56
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bge s5, a1, .LBB44_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB44_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB44_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB44_1 Depth=1
+; RV64I-NEXT:    sb a0, 15(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s1
+; RV64I-NEXT:    mv a4, s1
+; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lb a0, 15(sp)
+; RV64I-NEXT:    beqz a1, .LBB44_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw min i8* %a, i8 %b seq_cst
   ret i8 %1
 }
@@ -1970,6 +2782,47 @@ define i8 @atomicrmw_umax_i8_monotonic(i
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a4, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umax_i8_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -48
+; RV64I-NEXT:    sd ra, 40(sp)
+; RV64I-NEXT:    sd s1, 32(sp)
+; RV64I-NEXT:    sd s2, 24(sp)
+; RV64I-NEXT:    sd s3, 16(sp)
+; RV64I-NEXT:    sd s4, 8(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lbu a0, 0(a0)
+; RV64I-NEXT:    andi s1, a1, 255
+; RV64I-NEXT:    addi s3, sp, 7
+; RV64I-NEXT:  .LBB45_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    andi a1, a0, 255
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bltu s1, a1, .LBB45_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB45_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB45_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB45_1 Depth=1
+; RV64I-NEXT:    sb a0, 7(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, zero
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lb a0, 7(sp)
+; RV64I-NEXT:    beqz a1, .LBB45_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s4, 8(sp)
+; RV64I-NEXT:    ld s3, 16(sp)
+; RV64I-NEXT:    ld s2, 24(sp)
+; RV64I-NEXT:    ld s1, 32(sp)
+; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    ret
   %1 = atomicrmw umax i8* %a, i8 %b monotonic
   ret i8 %1
 }
@@ -2043,6 +2896,50 @@ define i8 @atomicrmw_umax_i8_acquire(i8
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a4, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umax_i8_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lbu a0, 0(a0)
+; RV64I-NEXT:    andi s5, a1, 255
+; RV64I-NEXT:    addi s3, sp, 15
+; RV64I-NEXT:    addi s1, zero, 2
+; RV64I-NEXT:  .LBB46_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    andi a1, a0, 255
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bltu s5, a1, .LBB46_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB46_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB46_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB46_1 Depth=1
+; RV64I-NEXT:    sb a0, 15(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s1
+; RV64I-NEXT:    mv a4, s1
+; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lb a0, 15(sp)
+; RV64I-NEXT:    beqz a1, .LBB46_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw umax i8* %a, i8 %b acquire
   ret i8 %1
 }
@@ -2116,6 +3013,50 @@ define i8 @atomicrmw_umax_i8_release(i8
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a4, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umax_i8_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s5, a0
+; RV64I-NEXT:    lbu a0, 0(a0)
+; RV64I-NEXT:    andi s1, a1, 255
+; RV64I-NEXT:    addi s3, sp, 15
+; RV64I-NEXT:    addi s4, zero, 3
+; RV64I-NEXT:  .LBB47_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    andi a1, a0, 255
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bltu s1, a1, .LBB47_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB47_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB47_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB47_1 Depth=1
+; RV64I-NEXT:    sb a0, 15(sp)
+; RV64I-NEXT:    mv a0, s5
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lb a0, 15(sp)
+; RV64I-NEXT:    beqz a1, .LBB47_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw umax i8* %a, i8 %b release
   ret i8 %1
 }
@@ -2192,6 +3133,53 @@ define i8 @atomicrmw_umax_i8_acq_rel(i8
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a4, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umax_i8_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    sd s6, 8(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s6, a0
+; RV64I-NEXT:    lbu a0, 0(a0)
+; RV64I-NEXT:    andi s1, a1, 255
+; RV64I-NEXT:    addi s3, sp, 7
+; RV64I-NEXT:    addi s4, zero, 4
+; RV64I-NEXT:    addi s5, zero, 2
+; RV64I-NEXT:  .LBB48_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    andi a1, a0, 255
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bltu s1, a1, .LBB48_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB48_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB48_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB48_1 Depth=1
+; RV64I-NEXT:    sb a0, 7(sp)
+; RV64I-NEXT:    mv a0, s6
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, s5
+; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lb a0, 7(sp)
+; RV64I-NEXT:    beqz a1, .LBB48_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s6, 8(sp)
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw umax i8* %a, i8 %b acq_rel
   ret i8 %1
 }
@@ -2265,6 +3253,50 @@ define i8 @atomicrmw_umax_i8_seq_cst(i8
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a4, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umax_i8_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lbu a0, 0(a0)
+; RV64I-NEXT:    andi s5, a1, 255
+; RV64I-NEXT:    addi s3, sp, 15
+; RV64I-NEXT:    addi s1, zero, 5
+; RV64I-NEXT:  .LBB49_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    andi a1, a0, 255
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bltu s5, a1, .LBB49_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB49_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB49_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB49_1 Depth=1
+; RV64I-NEXT:    sb a0, 15(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s1
+; RV64I-NEXT:    mv a4, s1
+; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lb a0, 15(sp)
+; RV64I-NEXT:    beqz a1, .LBB49_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw umax i8* %a, i8 %b seq_cst
   ret i8 %1
 }
@@ -2335,6 +3367,47 @@ define i8 @atomicrmw_umin_i8_monotonic(i
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a4, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umin_i8_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -48
+; RV64I-NEXT:    sd ra, 40(sp)
+; RV64I-NEXT:    sd s1, 32(sp)
+; RV64I-NEXT:    sd s2, 24(sp)
+; RV64I-NEXT:    sd s3, 16(sp)
+; RV64I-NEXT:    sd s4, 8(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lbu a0, 0(a0)
+; RV64I-NEXT:    andi s1, a1, 255
+; RV64I-NEXT:    addi s3, sp, 7
+; RV64I-NEXT:  .LBB50_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    andi a1, a0, 255
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bgeu s1, a1, .LBB50_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB50_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB50_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB50_1 Depth=1
+; RV64I-NEXT:    sb a0, 7(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, zero
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lb a0, 7(sp)
+; RV64I-NEXT:    beqz a1, .LBB50_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s4, 8(sp)
+; RV64I-NEXT:    ld s3, 16(sp)
+; RV64I-NEXT:    ld s2, 24(sp)
+; RV64I-NEXT:    ld s1, 32(sp)
+; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    ret
   %1 = atomicrmw umin i8* %a, i8 %b monotonic
   ret i8 %1
 }
@@ -2408,6 +3481,50 @@ define i8 @atomicrmw_umin_i8_acquire(i8
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a4, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umin_i8_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lbu a0, 0(a0)
+; RV64I-NEXT:    andi s5, a1, 255
+; RV64I-NEXT:    addi s3, sp, 15
+; RV64I-NEXT:    addi s1, zero, 2
+; RV64I-NEXT:  .LBB51_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    andi a1, a0, 255
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bgeu s5, a1, .LBB51_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB51_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB51_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB51_1 Depth=1
+; RV64I-NEXT:    sb a0, 15(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s1
+; RV64I-NEXT:    mv a4, s1
+; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lb a0, 15(sp)
+; RV64I-NEXT:    beqz a1, .LBB51_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw umin i8* %a, i8 %b acquire
   ret i8 %1
 }
@@ -2481,6 +3598,50 @@ define i8 @atomicrmw_umin_i8_release(i8
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a4, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umin_i8_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s5, a0
+; RV64I-NEXT:    lbu a0, 0(a0)
+; RV64I-NEXT:    andi s1, a1, 255
+; RV64I-NEXT:    addi s3, sp, 15
+; RV64I-NEXT:    addi s4, zero, 3
+; RV64I-NEXT:  .LBB52_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    andi a1, a0, 255
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bgeu s1, a1, .LBB52_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB52_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB52_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB52_1 Depth=1
+; RV64I-NEXT:    sb a0, 15(sp)
+; RV64I-NEXT:    mv a0, s5
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lb a0, 15(sp)
+; RV64I-NEXT:    beqz a1, .LBB52_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw umin i8* %a, i8 %b release
   ret i8 %1
 }
@@ -2557,6 +3718,53 @@ define i8 @atomicrmw_umin_i8_acq_rel(i8
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a4, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umin_i8_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    sd s6, 8(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s6, a0
+; RV64I-NEXT:    lbu a0, 0(a0)
+; RV64I-NEXT:    andi s1, a1, 255
+; RV64I-NEXT:    addi s3, sp, 7
+; RV64I-NEXT:    addi s4, zero, 4
+; RV64I-NEXT:    addi s5, zero, 2
+; RV64I-NEXT:  .LBB53_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    andi a1, a0, 255
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bgeu s1, a1, .LBB53_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB53_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB53_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB53_1 Depth=1
+; RV64I-NEXT:    sb a0, 7(sp)
+; RV64I-NEXT:    mv a0, s6
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, s5
+; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lb a0, 7(sp)
+; RV64I-NEXT:    beqz a1, .LBB53_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s6, 8(sp)
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw umin i8* %a, i8 %b acq_rel
   ret i8 %1
 }
@@ -2630,6 +3838,50 @@ define i8 @atomicrmw_umin_i8_seq_cst(i8
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a4, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umin_i8_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lbu a0, 0(a0)
+; RV64I-NEXT:    andi s5, a1, 255
+; RV64I-NEXT:    addi s3, sp, 15
+; RV64I-NEXT:    addi s1, zero, 5
+; RV64I-NEXT:  .LBB54_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    andi a1, a0, 255
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bgeu s5, a1, .LBB54_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB54_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB54_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB54_1 Depth=1
+; RV64I-NEXT:    sb a0, 15(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s1
+; RV64I-NEXT:    mv a4, s1
+; RV64I-NEXT:    call __atomic_compare_exchange_1
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lb a0, 15(sp)
+; RV64I-NEXT:    beqz a1, .LBB54_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw umin i8* %a, i8 %b seq_cst
   ret i8 %1
 }
@@ -2666,6 +3918,16 @@ define i16 @atomicrmw_xchg_i16_monotonic
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xchg_i16_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_exchange_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xchg i16* %a, i16 %b monotonic
   ret i16 %1
 }
@@ -2702,6 +3964,16 @@ define i16 @atomicrmw_xchg_i16_acquire(i
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xchg_i16_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    call __atomic_exchange_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xchg i16* %a, i16 %b acquire
   ret i16 %1
 }
@@ -2738,6 +4010,16 @@ define i16 @atomicrmw_xchg_i16_release(i
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xchg_i16_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    call __atomic_exchange_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xchg i16* %a, i16 %b release
   ret i16 %1
 }
@@ -2774,6 +4056,16 @@ define i16 @atomicrmw_xchg_i16_acq_rel(i
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xchg_i16_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    call __atomic_exchange_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xchg i16* %a, i16 %b acq_rel
   ret i16 %1
 }
@@ -2810,6 +4102,16 @@ define i16 @atomicrmw_xchg_i16_seq_cst(i
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xchg_i16_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    call __atomic_exchange_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xchg i16* %a, i16 %b seq_cst
   ret i16 %1
 }
@@ -2846,6 +4148,16 @@ define i16 @atomicrmw_add_i16_monotonic(
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_add_i16_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_fetch_add_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw add i16* %a, i16 %b monotonic
   ret i16 %1
 }
@@ -2882,6 +4194,16 @@ define i16 @atomicrmw_add_i16_acquire(i1
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_add_i16_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    call __atomic_fetch_add_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw add i16* %a, i16 %b acquire
   ret i16 %1
 }
@@ -2918,6 +4240,16 @@ define i16 @atomicrmw_add_i16_release(i1
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_add_i16_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    call __atomic_fetch_add_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw add i16* %a, i16 %b release
   ret i16 %1
 }
@@ -2954,6 +4286,16 @@ define i16 @atomicrmw_add_i16_acq_rel(i1
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_add_i16_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    call __atomic_fetch_add_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw add i16* %a, i16 %b acq_rel
   ret i16 %1
 }
@@ -2990,6 +4332,16 @@ define i16 @atomicrmw_add_i16_seq_cst(i1
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_add_i16_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    call __atomic_fetch_add_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw add i16* %a, i16 %b seq_cst
   ret i16 %1
 }
@@ -3026,6 +4378,16 @@ define i16 @atomicrmw_sub_i16_monotonic(
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_sub_i16_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_fetch_sub_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw sub i16* %a, i16 %b monotonic
   ret i16 %1
 }
@@ -3062,6 +4424,16 @@ define i16 @atomicrmw_sub_i16_acquire(i1
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_sub_i16_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    call __atomic_fetch_sub_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw sub i16* %a, i16 %b acquire
   ret i16 %1
 }
@@ -3098,6 +4470,16 @@ define i16 @atomicrmw_sub_i16_release(i1
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_sub_i16_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    call __atomic_fetch_sub_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw sub i16* %a, i16 %b release
   ret i16 %1
 }
@@ -3134,6 +4516,16 @@ define i16 @atomicrmw_sub_i16_acq_rel(i1
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_sub_i16_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    call __atomic_fetch_sub_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw sub i16* %a, i16 %b acq_rel
   ret i16 %1
 }
@@ -3170,6 +4562,16 @@ define i16 @atomicrmw_sub_i16_seq_cst(i1
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_sub_i16_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    call __atomic_fetch_sub_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw sub i16* %a, i16 %b seq_cst
   ret i16 %1
 }
@@ -3200,6 +4602,16 @@ define i16 @atomicrmw_and_i16_monotonic(
 ; RV32IA-NEXT:    amoand.w a0, a1, (a0)
 ; RV32IA-NEXT:    srl a0, a0, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_and_i16_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_fetch_and_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw and i16* %a, i16 %b monotonic
   ret i16 %1
 }
@@ -3230,6 +4642,16 @@ define i16 @atomicrmw_and_i16_acquire(i1
 ; RV32IA-NEXT:    amoand.w.aq a0, a1, (a0)
 ; RV32IA-NEXT:    srl a0, a0, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_and_i16_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    call __atomic_fetch_and_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw and i16* %a, i16 %b acquire
   ret i16 %1
 }
@@ -3260,6 +4682,16 @@ define i16 @atomicrmw_and_i16_release(i1
 ; RV32IA-NEXT:    amoand.w.rl a0, a1, (a0)
 ; RV32IA-NEXT:    srl a0, a0, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_and_i16_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    call __atomic_fetch_and_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw and i16* %a, i16 %b release
   ret i16 %1
 }
@@ -3290,6 +4722,16 @@ define i16 @atomicrmw_and_i16_acq_rel(i1
 ; RV32IA-NEXT:    amoand.w.aqrl a0, a1, (a0)
 ; RV32IA-NEXT:    srl a0, a0, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_and_i16_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    call __atomic_fetch_and_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw and i16* %a, i16 %b acq_rel
   ret i16 %1
 }
@@ -3320,6 +4762,16 @@ define i16 @atomicrmw_and_i16_seq_cst(i1
 ; RV32IA-NEXT:    amoand.w.aqrl a0, a1, (a0)
 ; RV32IA-NEXT:    srl a0, a0, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_and_i16_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    call __atomic_fetch_and_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw and i16* %a, i16 %b seq_cst
   ret i16 %1
 }
@@ -3357,6 +4809,16 @@ define i16 @atomicrmw_nand_i16_monotonic
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_nand_i16_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_fetch_nand_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw nand i16* %a, i16 %b monotonic
   ret i16 %1
 }
@@ -3394,6 +4856,16 @@ define i16 @atomicrmw_nand_i16_acquire(i
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_nand_i16_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    call __atomic_fetch_nand_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw nand i16* %a, i16 %b acquire
   ret i16 %1
 }
@@ -3431,6 +4903,16 @@ define i16 @atomicrmw_nand_i16_release(i
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_nand_i16_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    call __atomic_fetch_nand_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw nand i16* %a, i16 %b release
   ret i16 %1
 }
@@ -3468,6 +4950,16 @@ define i16 @atomicrmw_nand_i16_acq_rel(i
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_nand_i16_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    call __atomic_fetch_nand_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw nand i16* %a, i16 %b acq_rel
   ret i16 %1
 }
@@ -3505,6 +4997,16 @@ define i16 @atomicrmw_nand_i16_seq_cst(i
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_nand_i16_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    call __atomic_fetch_nand_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw nand i16* %a, i16 %b seq_cst
   ret i16 %1
 }
@@ -3532,6 +5034,16 @@ define i16 @atomicrmw_or_i16_monotonic(i
 ; RV32IA-NEXT:    amoor.w a0, a1, (a0)
 ; RV32IA-NEXT:    srl a0, a0, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_or_i16_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_fetch_or_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw or i16* %a, i16 %b monotonic
   ret i16 %1
 }
@@ -3559,6 +5071,16 @@ define i16 @atomicrmw_or_i16_acquire(i16
 ; RV32IA-NEXT:    amoor.w.aq a0, a1, (a0)
 ; RV32IA-NEXT:    srl a0, a0, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_or_i16_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    call __atomic_fetch_or_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw or i16* %a, i16 %b acquire
   ret i16 %1
 }
@@ -3586,6 +5108,16 @@ define i16 @atomicrmw_or_i16_release(i16
 ; RV32IA-NEXT:    amoor.w.rl a0, a1, (a0)
 ; RV32IA-NEXT:    srl a0, a0, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_or_i16_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    call __atomic_fetch_or_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw or i16* %a, i16 %b release
   ret i16 %1
 }
@@ -3613,6 +5145,16 @@ define i16 @atomicrmw_or_i16_acq_rel(i16
 ; RV32IA-NEXT:    amoor.w.aqrl a0, a1, (a0)
 ; RV32IA-NEXT:    srl a0, a0, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_or_i16_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    call __atomic_fetch_or_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw or i16* %a, i16 %b acq_rel
   ret i16 %1
 }
@@ -3640,6 +5182,16 @@ define i16 @atomicrmw_or_i16_seq_cst(i16
 ; RV32IA-NEXT:    amoor.w.aqrl a0, a1, (a0)
 ; RV32IA-NEXT:    srl a0, a0, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_or_i16_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    call __atomic_fetch_or_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw or i16* %a, i16 %b seq_cst
   ret i16 %1
 }
@@ -3667,6 +5219,16 @@ define i16 @atomicrmw_xor_i16_monotonic(
 ; RV32IA-NEXT:    amoxor.w a0, a1, (a0)
 ; RV32IA-NEXT:    srl a0, a0, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xor_i16_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_fetch_xor_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xor i16* %a, i16 %b monotonic
   ret i16 %1
 }
@@ -3694,6 +5256,16 @@ define i16 @atomicrmw_xor_i16_acquire(i1
 ; RV32IA-NEXT:    amoxor.w.aq a0, a1, (a0)
 ; RV32IA-NEXT:    srl a0, a0, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xor_i16_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    call __atomic_fetch_xor_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xor i16* %a, i16 %b acquire
   ret i16 %1
 }
@@ -3721,6 +5293,16 @@ define i16 @atomicrmw_xor_i16_release(i1
 ; RV32IA-NEXT:    amoxor.w.rl a0, a1, (a0)
 ; RV32IA-NEXT:    srl a0, a0, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xor_i16_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    call __atomic_fetch_xor_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xor i16* %a, i16 %b release
   ret i16 %1
 }
@@ -3748,6 +5330,16 @@ define i16 @atomicrmw_xor_i16_acq_rel(i1
 ; RV32IA-NEXT:    amoxor.w.aqrl a0, a1, (a0)
 ; RV32IA-NEXT:    srl a0, a0, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xor_i16_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    call __atomic_fetch_xor_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xor i16* %a, i16 %b acq_rel
   ret i16 %1
 }
@@ -3775,6 +5367,16 @@ define i16 @atomicrmw_xor_i16_seq_cst(i1
 ; RV32IA-NEXT:    amoxor.w.aqrl a0, a1, (a0)
 ; RV32IA-NEXT:    srl a0, a0, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xor_i16_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    call __atomic_fetch_xor_2
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xor i16* %a, i16 %b seq_cst
   ret i16 %1
 }
@@ -3853,6 +5455,49 @@ define i16 @atomicrmw_max_i16_monotonic(
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a5, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_max_i16_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -48
+; RV64I-NEXT:    sd ra, 40(sp)
+; RV64I-NEXT:    sd s1, 32(sp)
+; RV64I-NEXT:    sd s2, 24(sp)
+; RV64I-NEXT:    sd s3, 16(sp)
+; RV64I-NEXT:    sd s4, 8(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lhu a0, 0(a0)
+; RV64I-NEXT:    slli a1, a1, 48
+; RV64I-NEXT:    srai s1, a1, 48
+; RV64I-NEXT:    addi s3, sp, 6
+; RV64I-NEXT:  .LBB90_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    slli a1, a0, 48
+; RV64I-NEXT:    srai a1, a1, 48
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    blt s1, a1, .LBB90_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB90_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB90_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB90_1 Depth=1
+; RV64I-NEXT:    sh a0, 6(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, zero
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lh a0, 6(sp)
+; RV64I-NEXT:    beqz a1, .LBB90_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s4, 8(sp)
+; RV64I-NEXT:    ld s3, 16(sp)
+; RV64I-NEXT:    ld s2, 24(sp)
+; RV64I-NEXT:    ld s1, 32(sp)
+; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    ret
   %1 = atomicrmw max i16* %a, i16 %b monotonic
   ret i16 %1
 }
@@ -3934,6 +5579,52 @@ define i16 @atomicrmw_max_i16_acquire(i1
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a5, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_max_i16_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lhu a0, 0(a0)
+; RV64I-NEXT:    slli a1, a1, 48
+; RV64I-NEXT:    srai s5, a1, 48
+; RV64I-NEXT:    addi s3, sp, 14
+; RV64I-NEXT:    addi s1, zero, 2
+; RV64I-NEXT:  .LBB91_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    slli a1, a0, 48
+; RV64I-NEXT:    srai a1, a1, 48
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    blt s5, a1, .LBB91_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB91_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB91_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB91_1 Depth=1
+; RV64I-NEXT:    sh a0, 14(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s1
+; RV64I-NEXT:    mv a4, s1
+; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lh a0, 14(sp)
+; RV64I-NEXT:    beqz a1, .LBB91_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw max i16* %a, i16 %b acquire
   ret i16 %1
 }
@@ -4015,6 +5706,52 @@ define i16 @atomicrmw_max_i16_release(i1
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a5, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_max_i16_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s5, a0
+; RV64I-NEXT:    lhu a0, 0(a0)
+; RV64I-NEXT:    slli a1, a1, 48
+; RV64I-NEXT:    srai s1, a1, 48
+; RV64I-NEXT:    addi s3, sp, 14
+; RV64I-NEXT:    addi s4, zero, 3
+; RV64I-NEXT:  .LBB92_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    slli a1, a0, 48
+; RV64I-NEXT:    srai a1, a1, 48
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    blt s1, a1, .LBB92_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB92_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB92_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB92_1 Depth=1
+; RV64I-NEXT:    sh a0, 14(sp)
+; RV64I-NEXT:    mv a0, s5
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lh a0, 14(sp)
+; RV64I-NEXT:    beqz a1, .LBB92_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw max i16* %a, i16 %b release
   ret i16 %1
 }
@@ -4099,6 +5836,55 @@ define i16 @atomicrmw_max_i16_acq_rel(i1
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a5, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_max_i16_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    sd s6, 8(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s6, a0
+; RV64I-NEXT:    lhu a0, 0(a0)
+; RV64I-NEXT:    slli a1, a1, 48
+; RV64I-NEXT:    srai s1, a1, 48
+; RV64I-NEXT:    addi s3, sp, 6
+; RV64I-NEXT:    addi s4, zero, 4
+; RV64I-NEXT:    addi s5, zero, 2
+; RV64I-NEXT:  .LBB93_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    slli a1, a0, 48
+; RV64I-NEXT:    srai a1, a1, 48
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    blt s1, a1, .LBB93_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB93_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB93_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB93_1 Depth=1
+; RV64I-NEXT:    sh a0, 6(sp)
+; RV64I-NEXT:    mv a0, s6
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, s5
+; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lh a0, 6(sp)
+; RV64I-NEXT:    beqz a1, .LBB93_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s6, 8(sp)
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw max i16* %a, i16 %b acq_rel
   ret i16 %1
 }
@@ -4180,6 +5966,52 @@ define i16 @atomicrmw_max_i16_seq_cst(i1
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a5, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_max_i16_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lhu a0, 0(a0)
+; RV64I-NEXT:    slli a1, a1, 48
+; RV64I-NEXT:    srai s5, a1, 48
+; RV64I-NEXT:    addi s3, sp, 14
+; RV64I-NEXT:    addi s1, zero, 5
+; RV64I-NEXT:  .LBB94_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    slli a1, a0, 48
+; RV64I-NEXT:    srai a1, a1, 48
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    blt s5, a1, .LBB94_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB94_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB94_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB94_1 Depth=1
+; RV64I-NEXT:    sh a0, 14(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s1
+; RV64I-NEXT:    mv a4, s1
+; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lh a0, 14(sp)
+; RV64I-NEXT:    beqz a1, .LBB94_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw max i16* %a, i16 %b seq_cst
   ret i16 %1
 }
@@ -4258,6 +6090,49 @@ define i16 @atomicrmw_min_i16_monotonic(
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a5, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_min_i16_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -48
+; RV64I-NEXT:    sd ra, 40(sp)
+; RV64I-NEXT:    sd s1, 32(sp)
+; RV64I-NEXT:    sd s2, 24(sp)
+; RV64I-NEXT:    sd s3, 16(sp)
+; RV64I-NEXT:    sd s4, 8(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lhu a0, 0(a0)
+; RV64I-NEXT:    slli a1, a1, 48
+; RV64I-NEXT:    srai s1, a1, 48
+; RV64I-NEXT:    addi s3, sp, 6
+; RV64I-NEXT:  .LBB95_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    slli a1, a0, 48
+; RV64I-NEXT:    srai a1, a1, 48
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bge s1, a1, .LBB95_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB95_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB95_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB95_1 Depth=1
+; RV64I-NEXT:    sh a0, 6(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, zero
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lh a0, 6(sp)
+; RV64I-NEXT:    beqz a1, .LBB95_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s4, 8(sp)
+; RV64I-NEXT:    ld s3, 16(sp)
+; RV64I-NEXT:    ld s2, 24(sp)
+; RV64I-NEXT:    ld s1, 32(sp)
+; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    ret
   %1 = atomicrmw min i16* %a, i16 %b monotonic
   ret i16 %1
 }
@@ -4339,6 +6214,52 @@ define i16 @atomicrmw_min_i16_acquire(i1
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a5, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_min_i16_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lhu a0, 0(a0)
+; RV64I-NEXT:    slli a1, a1, 48
+; RV64I-NEXT:    srai s5, a1, 48
+; RV64I-NEXT:    addi s3, sp, 14
+; RV64I-NEXT:    addi s1, zero, 2
+; RV64I-NEXT:  .LBB96_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    slli a1, a0, 48
+; RV64I-NEXT:    srai a1, a1, 48
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bge s5, a1, .LBB96_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB96_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB96_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB96_1 Depth=1
+; RV64I-NEXT:    sh a0, 14(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s1
+; RV64I-NEXT:    mv a4, s1
+; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lh a0, 14(sp)
+; RV64I-NEXT:    beqz a1, .LBB96_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw min i16* %a, i16 %b acquire
   ret i16 %1
 }
@@ -4420,6 +6341,52 @@ define i16 @atomicrmw_min_i16_release(i1
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a5, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_min_i16_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s5, a0
+; RV64I-NEXT:    lhu a0, 0(a0)
+; RV64I-NEXT:    slli a1, a1, 48
+; RV64I-NEXT:    srai s1, a1, 48
+; RV64I-NEXT:    addi s3, sp, 14
+; RV64I-NEXT:    addi s4, zero, 3
+; RV64I-NEXT:  .LBB97_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    slli a1, a0, 48
+; RV64I-NEXT:    srai a1, a1, 48
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bge s1, a1, .LBB97_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB97_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB97_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB97_1 Depth=1
+; RV64I-NEXT:    sh a0, 14(sp)
+; RV64I-NEXT:    mv a0, s5
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lh a0, 14(sp)
+; RV64I-NEXT:    beqz a1, .LBB97_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw min i16* %a, i16 %b release
   ret i16 %1
 }
@@ -4504,6 +6471,55 @@ define i16 @atomicrmw_min_i16_acq_rel(i1
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a5, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_min_i16_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    sd s6, 8(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s6, a0
+; RV64I-NEXT:    lhu a0, 0(a0)
+; RV64I-NEXT:    slli a1, a1, 48
+; RV64I-NEXT:    srai s1, a1, 48
+; RV64I-NEXT:    addi s3, sp, 6
+; RV64I-NEXT:    addi s4, zero, 4
+; RV64I-NEXT:    addi s5, zero, 2
+; RV64I-NEXT:  .LBB98_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    slli a1, a0, 48
+; RV64I-NEXT:    srai a1, a1, 48
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bge s1, a1, .LBB98_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB98_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB98_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB98_1 Depth=1
+; RV64I-NEXT:    sh a0, 6(sp)
+; RV64I-NEXT:    mv a0, s6
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, s5
+; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lh a0, 6(sp)
+; RV64I-NEXT:    beqz a1, .LBB98_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s6, 8(sp)
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw min i16* %a, i16 %b acq_rel
   ret i16 %1
 }
@@ -4585,6 +6601,52 @@ define i16 @atomicrmw_min_i16_seq_cst(i1
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a5, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_min_i16_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lhu a0, 0(a0)
+; RV64I-NEXT:    slli a1, a1, 48
+; RV64I-NEXT:    srai s5, a1, 48
+; RV64I-NEXT:    addi s3, sp, 14
+; RV64I-NEXT:    addi s1, zero, 5
+; RV64I-NEXT:  .LBB99_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    slli a1, a0, 48
+; RV64I-NEXT:    srai a1, a1, 48
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bge s5, a1, .LBB99_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB99_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB99_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB99_1 Depth=1
+; RV64I-NEXT:    sh a0, 14(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s1
+; RV64I-NEXT:    mv a4, s1
+; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lh a0, 14(sp)
+; RV64I-NEXT:    beqz a1, .LBB99_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw min i16* %a, i16 %b seq_cst
   ret i16 %1
 }
@@ -4660,6 +6722,51 @@ define i16 @atomicrmw_umax_i16_monotonic
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a4, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umax_i16_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lhu a0, 0(a0)
+; RV64I-NEXT:    lui a1, 16
+; RV64I-NEXT:    addiw s1, a1, -1
+; RV64I-NEXT:    and s5, s2, s1
+; RV64I-NEXT:    addi s3, sp, 14
+; RV64I-NEXT:  .LBB100_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    and a1, a0, s1
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bltu s5, a1, .LBB100_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB100_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB100_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB100_1 Depth=1
+; RV64I-NEXT:    sh a0, 14(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, zero
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lh a0, 14(sp)
+; RV64I-NEXT:    beqz a1, .LBB100_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw umax i16* %a, i16 %b monotonic
   ret i16 %1
 }
@@ -4738,6 +6845,54 @@ define i16 @atomicrmw_umax_i16_acquire(i
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a4, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umax_i16_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    sd s6, 8(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lhu a0, 0(a0)
+; RV64I-NEXT:    lui a1, 16
+; RV64I-NEXT:    addiw s5, a1, -1
+; RV64I-NEXT:    and s6, s2, s5
+; RV64I-NEXT:    addi s3, sp, 6
+; RV64I-NEXT:    addi s1, zero, 2
+; RV64I-NEXT:  .LBB101_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    and a1, a0, s5
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bltu s6, a1, .LBB101_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB101_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB101_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB101_1 Depth=1
+; RV64I-NEXT:    sh a0, 6(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s1
+; RV64I-NEXT:    mv a4, s1
+; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lh a0, 6(sp)
+; RV64I-NEXT:    beqz a1, .LBB101_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s6, 8(sp)
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw umax i16* %a, i16 %b acquire
   ret i16 %1
 }
@@ -4816,6 +6971,54 @@ define i16 @atomicrmw_umax_i16_release(i
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a4, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umax_i16_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    sd s6, 8(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s5, a0
+; RV64I-NEXT:    lhu a0, 0(a0)
+; RV64I-NEXT:    lui a1, 16
+; RV64I-NEXT:    addiw s1, a1, -1
+; RV64I-NEXT:    and s6, s2, s1
+; RV64I-NEXT:    addi s3, sp, 6
+; RV64I-NEXT:    addi s4, zero, 3
+; RV64I-NEXT:  .LBB102_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    and a1, a0, s1
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bltu s6, a1, .LBB102_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB102_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB102_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB102_1 Depth=1
+; RV64I-NEXT:    sh a0, 6(sp)
+; RV64I-NEXT:    mv a0, s5
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lh a0, 6(sp)
+; RV64I-NEXT:    beqz a1, .LBB102_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s6, 8(sp)
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw umax i16* %a, i16 %b release
   ret i16 %1
 }
@@ -4897,6 +7100,57 @@ define i16 @atomicrmw_umax_i16_acq_rel(i
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a4, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umax_i16_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -80
+; RV64I-NEXT:    sd ra, 72(sp)
+; RV64I-NEXT:    sd s1, 64(sp)
+; RV64I-NEXT:    sd s2, 56(sp)
+; RV64I-NEXT:    sd s3, 48(sp)
+; RV64I-NEXT:    sd s4, 40(sp)
+; RV64I-NEXT:    sd s5, 32(sp)
+; RV64I-NEXT:    sd s6, 24(sp)
+; RV64I-NEXT:    sd s7, 16(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s6, a0
+; RV64I-NEXT:    lhu a0, 0(a0)
+; RV64I-NEXT:    lui a1, 16
+; RV64I-NEXT:    addiw s1, a1, -1
+; RV64I-NEXT:    and s7, s2, s1
+; RV64I-NEXT:    addi s3, sp, 14
+; RV64I-NEXT:    addi s4, zero, 4
+; RV64I-NEXT:    addi s5, zero, 2
+; RV64I-NEXT:  .LBB103_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    and a1, a0, s1
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bltu s7, a1, .LBB103_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB103_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB103_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB103_1 Depth=1
+; RV64I-NEXT:    sh a0, 14(sp)
+; RV64I-NEXT:    mv a0, s6
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, s5
+; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lh a0, 14(sp)
+; RV64I-NEXT:    beqz a1, .LBB103_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s7, 16(sp)
+; RV64I-NEXT:    ld s6, 24(sp)
+; RV64I-NEXT:    ld s5, 32(sp)
+; RV64I-NEXT:    ld s4, 40(sp)
+; RV64I-NEXT:    ld s3, 48(sp)
+; RV64I-NEXT:    ld s2, 56(sp)
+; RV64I-NEXT:    ld s1, 64(sp)
+; RV64I-NEXT:    ld ra, 72(sp)
+; RV64I-NEXT:    addi sp, sp, 80
+; RV64I-NEXT:    ret
   %1 = atomicrmw umax i16* %a, i16 %b acq_rel
   ret i16 %1
 }
@@ -4975,6 +7229,54 @@ define i16 @atomicrmw_umax_i16_seq_cst(i
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a4, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umax_i16_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    sd s6, 8(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lhu a0, 0(a0)
+; RV64I-NEXT:    lui a1, 16
+; RV64I-NEXT:    addiw s5, a1, -1
+; RV64I-NEXT:    and s6, s2, s5
+; RV64I-NEXT:    addi s3, sp, 6
+; RV64I-NEXT:    addi s1, zero, 5
+; RV64I-NEXT:  .LBB104_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    and a1, a0, s5
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bltu s6, a1, .LBB104_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB104_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB104_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB104_1 Depth=1
+; RV64I-NEXT:    sh a0, 6(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s1
+; RV64I-NEXT:    mv a4, s1
+; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lh a0, 6(sp)
+; RV64I-NEXT:    beqz a1, .LBB104_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s6, 8(sp)
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw umax i16* %a, i16 %b seq_cst
   ret i16 %1
 }
@@ -5050,6 +7352,51 @@ define i16 @atomicrmw_umin_i16_monotonic
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a4, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umin_i16_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lhu a0, 0(a0)
+; RV64I-NEXT:    lui a1, 16
+; RV64I-NEXT:    addiw s1, a1, -1
+; RV64I-NEXT:    and s5, s2, s1
+; RV64I-NEXT:    addi s3, sp, 14
+; RV64I-NEXT:  .LBB105_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    and a1, a0, s1
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bgeu s5, a1, .LBB105_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB105_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB105_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB105_1 Depth=1
+; RV64I-NEXT:    sh a0, 14(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, zero
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lh a0, 14(sp)
+; RV64I-NEXT:    beqz a1, .LBB105_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw umin i16* %a, i16 %b monotonic
   ret i16 %1
 }
@@ -5128,6 +7475,54 @@ define i16 @atomicrmw_umin_i16_acquire(i
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a4, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umin_i16_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    sd s6, 8(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lhu a0, 0(a0)
+; RV64I-NEXT:    lui a1, 16
+; RV64I-NEXT:    addiw s5, a1, -1
+; RV64I-NEXT:    and s6, s2, s5
+; RV64I-NEXT:    addi s3, sp, 6
+; RV64I-NEXT:    addi s1, zero, 2
+; RV64I-NEXT:  .LBB106_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    and a1, a0, s5
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bgeu s6, a1, .LBB106_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB106_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB106_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB106_1 Depth=1
+; RV64I-NEXT:    sh a0, 6(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s1
+; RV64I-NEXT:    mv a4, s1
+; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lh a0, 6(sp)
+; RV64I-NEXT:    beqz a1, .LBB106_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s6, 8(sp)
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw umin i16* %a, i16 %b acquire
   ret i16 %1
 }
@@ -5206,6 +7601,54 @@ define i16 @atomicrmw_umin_i16_release(i
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a4, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umin_i16_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    sd s6, 8(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s5, a0
+; RV64I-NEXT:    lhu a0, 0(a0)
+; RV64I-NEXT:    lui a1, 16
+; RV64I-NEXT:    addiw s1, a1, -1
+; RV64I-NEXT:    and s6, s2, s1
+; RV64I-NEXT:    addi s3, sp, 6
+; RV64I-NEXT:    addi s4, zero, 3
+; RV64I-NEXT:  .LBB107_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    and a1, a0, s1
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bgeu s6, a1, .LBB107_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB107_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB107_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB107_1 Depth=1
+; RV64I-NEXT:    sh a0, 6(sp)
+; RV64I-NEXT:    mv a0, s5
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lh a0, 6(sp)
+; RV64I-NEXT:    beqz a1, .LBB107_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s6, 8(sp)
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw umin i16* %a, i16 %b release
   ret i16 %1
 }
@@ -5287,6 +7730,57 @@ define i16 @atomicrmw_umin_i16_acq_rel(i
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a4, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umin_i16_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -80
+; RV64I-NEXT:    sd ra, 72(sp)
+; RV64I-NEXT:    sd s1, 64(sp)
+; RV64I-NEXT:    sd s2, 56(sp)
+; RV64I-NEXT:    sd s3, 48(sp)
+; RV64I-NEXT:    sd s4, 40(sp)
+; RV64I-NEXT:    sd s5, 32(sp)
+; RV64I-NEXT:    sd s6, 24(sp)
+; RV64I-NEXT:    sd s7, 16(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s6, a0
+; RV64I-NEXT:    lhu a0, 0(a0)
+; RV64I-NEXT:    lui a1, 16
+; RV64I-NEXT:    addiw s1, a1, -1
+; RV64I-NEXT:    and s7, s2, s1
+; RV64I-NEXT:    addi s3, sp, 14
+; RV64I-NEXT:    addi s4, zero, 4
+; RV64I-NEXT:    addi s5, zero, 2
+; RV64I-NEXT:  .LBB108_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    and a1, a0, s1
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bgeu s7, a1, .LBB108_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB108_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB108_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB108_1 Depth=1
+; RV64I-NEXT:    sh a0, 14(sp)
+; RV64I-NEXT:    mv a0, s6
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, s5
+; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lh a0, 14(sp)
+; RV64I-NEXT:    beqz a1, .LBB108_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s7, 16(sp)
+; RV64I-NEXT:    ld s6, 24(sp)
+; RV64I-NEXT:    ld s5, 32(sp)
+; RV64I-NEXT:    ld s4, 40(sp)
+; RV64I-NEXT:    ld s3, 48(sp)
+; RV64I-NEXT:    ld s2, 56(sp)
+; RV64I-NEXT:    ld s1, 64(sp)
+; RV64I-NEXT:    ld ra, 72(sp)
+; RV64I-NEXT:    addi sp, sp, 80
+; RV64I-NEXT:    ret
   %1 = atomicrmw umin i16* %a, i16 %b acq_rel
   ret i16 %1
 }
@@ -5365,6 +7859,54 @@ define i16 @atomicrmw_umin_i16_seq_cst(i
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a4, a3
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umin_i16_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    sd s6, 8(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lhu a0, 0(a0)
+; RV64I-NEXT:    lui a1, 16
+; RV64I-NEXT:    addiw s5, a1, -1
+; RV64I-NEXT:    and s6, s2, s5
+; RV64I-NEXT:    addi s3, sp, 6
+; RV64I-NEXT:    addi s1, zero, 5
+; RV64I-NEXT:  .LBB109_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    and a1, a0, s5
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bgeu s6, a1, .LBB109_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB109_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB109_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB109_1 Depth=1
+; RV64I-NEXT:    sh a0, 6(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s1
+; RV64I-NEXT:    mv a4, s1
+; RV64I-NEXT:    call __atomic_compare_exchange_2
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lh a0, 6(sp)
+; RV64I-NEXT:    beqz a1, .LBB109_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s6, 8(sp)
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw umin i16* %a, i16 %b seq_cst
   ret i16 %1
 }
@@ -5384,6 +7926,16 @@ define i32 @atomicrmw_xchg_i32_monotonic
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amoswap.w a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xchg_i32_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_exchange_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xchg i32* %a, i32 %b monotonic
   ret i32 %1
 }
@@ -5403,6 +7955,16 @@ define i32 @atomicrmw_xchg_i32_acquire(i
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amoswap.w.aq a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xchg_i32_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    call __atomic_exchange_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xchg i32* %a, i32 %b acquire
   ret i32 %1
 }
@@ -5422,6 +7984,16 @@ define i32 @atomicrmw_xchg_i32_release(i
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amoswap.w.rl a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xchg_i32_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    call __atomic_exchange_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xchg i32* %a, i32 %b release
   ret i32 %1
 }
@@ -5441,6 +8013,16 @@ define i32 @atomicrmw_xchg_i32_acq_rel(i
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amoswap.w.aqrl a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xchg_i32_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    call __atomic_exchange_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xchg i32* %a, i32 %b acq_rel
   ret i32 %1
 }
@@ -5460,6 +8042,16 @@ define i32 @atomicrmw_xchg_i32_seq_cst(i
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amoswap.w.aqrl a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xchg_i32_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    call __atomic_exchange_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xchg i32* %a, i32 %b seq_cst
   ret i32 %1
 }
@@ -5479,6 +8071,16 @@ define i32 @atomicrmw_add_i32_monotonic(
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amoadd.w a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_add_i32_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_fetch_add_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw add i32* %a, i32 %b monotonic
   ret i32 %1
 }
@@ -5498,6 +8100,16 @@ define i32 @atomicrmw_add_i32_acquire(i3
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amoadd.w.aq a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_add_i32_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    call __atomic_fetch_add_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw add i32* %a, i32 %b acquire
   ret i32 %1
 }
@@ -5517,6 +8129,16 @@ define i32 @atomicrmw_add_i32_release(i3
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amoadd.w.rl a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_add_i32_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    call __atomic_fetch_add_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw add i32* %a, i32 %b release
   ret i32 %1
 }
@@ -5536,6 +8158,16 @@ define i32 @atomicrmw_add_i32_acq_rel(i3
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amoadd.w.aqrl a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_add_i32_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    call __atomic_fetch_add_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw add i32* %a, i32 %b acq_rel
   ret i32 %1
 }
@@ -5555,6 +8187,16 @@ define i32 @atomicrmw_add_i32_seq_cst(i3
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amoadd.w.aqrl a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_add_i32_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    call __atomic_fetch_add_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw add i32* %a, i32 %b seq_cst
   ret i32 %1
 }
@@ -5575,6 +8217,16 @@ define i32 @atomicrmw_sub_i32_monotonic(
 ; RV32IA-NEXT:    neg a1, a1
 ; RV32IA-NEXT:    amoadd.w a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_sub_i32_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_fetch_sub_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw sub i32* %a, i32 %b monotonic
   ret i32 %1
 }
@@ -5595,6 +8247,16 @@ define i32 @atomicrmw_sub_i32_acquire(i3
 ; RV32IA-NEXT:    neg a1, a1
 ; RV32IA-NEXT:    amoadd.w.aq a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_sub_i32_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    call __atomic_fetch_sub_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw sub i32* %a, i32 %b acquire
   ret i32 %1
 }
@@ -5615,6 +8277,16 @@ define i32 @atomicrmw_sub_i32_release(i3
 ; RV32IA-NEXT:    neg a1, a1
 ; RV32IA-NEXT:    amoadd.w.rl a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_sub_i32_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    call __atomic_fetch_sub_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw sub i32* %a, i32 %b release
   ret i32 %1
 }
@@ -5635,6 +8307,16 @@ define i32 @atomicrmw_sub_i32_acq_rel(i3
 ; RV32IA-NEXT:    neg a1, a1
 ; RV32IA-NEXT:    amoadd.w.aqrl a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_sub_i32_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    call __atomic_fetch_sub_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw sub i32* %a, i32 %b acq_rel
   ret i32 %1
 }
@@ -5655,6 +8337,16 @@ define i32 @atomicrmw_sub_i32_seq_cst(i3
 ; RV32IA-NEXT:    neg a1, a1
 ; RV32IA-NEXT:    amoadd.w.aqrl a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_sub_i32_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    call __atomic_fetch_sub_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw sub i32* %a, i32 %b seq_cst
   ret i32 %1
 }
@@ -5674,6 +8366,16 @@ define i32 @atomicrmw_and_i32_monotonic(
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amoand.w a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_and_i32_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_fetch_and_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw and i32* %a, i32 %b monotonic
   ret i32 %1
 }
@@ -5693,6 +8395,16 @@ define i32 @atomicrmw_and_i32_acquire(i3
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amoand.w.aq a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_and_i32_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    call __atomic_fetch_and_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw and i32* %a, i32 %b acquire
   ret i32 %1
 }
@@ -5712,6 +8424,16 @@ define i32 @atomicrmw_and_i32_release(i3
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amoand.w.rl a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_and_i32_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    call __atomic_fetch_and_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw and i32* %a, i32 %b release
   ret i32 %1
 }
@@ -5731,6 +8453,16 @@ define i32 @atomicrmw_and_i32_acq_rel(i3
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amoand.w.aqrl a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_and_i32_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    call __atomic_fetch_and_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw and i32* %a, i32 %b acq_rel
   ret i32 %1
 }
@@ -5750,6 +8482,16 @@ define i32 @atomicrmw_and_i32_seq_cst(i3
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amoand.w.aqrl a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_and_i32_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    call __atomic_fetch_and_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw and i32* %a, i32 %b seq_cst
   ret i32 %1
 }
@@ -5776,6 +8518,16 @@ define i32 @atomicrmw_nand_i32_monotonic
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    mv a0, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_nand_i32_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_fetch_nand_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw nand i32* %a, i32 %b monotonic
   ret i32 %1
 }
@@ -5802,6 +8554,16 @@ define i32 @atomicrmw_nand_i32_acquire(i
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    mv a0, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_nand_i32_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    call __atomic_fetch_nand_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw nand i32* %a, i32 %b acquire
   ret i32 %1
 }
@@ -5828,6 +8590,16 @@ define i32 @atomicrmw_nand_i32_release(i
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    mv a0, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_nand_i32_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    call __atomic_fetch_nand_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw nand i32* %a, i32 %b release
   ret i32 %1
 }
@@ -5854,6 +8626,16 @@ define i32 @atomicrmw_nand_i32_acq_rel(i
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    mv a0, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_nand_i32_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    call __atomic_fetch_nand_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw nand i32* %a, i32 %b acq_rel
   ret i32 %1
 }
@@ -5880,6 +8662,16 @@ define i32 @atomicrmw_nand_i32_seq_cst(i
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    mv a0, a2
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_nand_i32_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    call __atomic_fetch_nand_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw nand i32* %a, i32 %b seq_cst
   ret i32 %1
 }
@@ -5899,6 +8691,16 @@ define i32 @atomicrmw_or_i32_monotonic(i
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amoor.w a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_or_i32_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_fetch_or_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw or i32* %a, i32 %b monotonic
   ret i32 %1
 }
@@ -5918,6 +8720,16 @@ define i32 @atomicrmw_or_i32_acquire(i32
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amoor.w.aq a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_or_i32_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    call __atomic_fetch_or_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw or i32* %a, i32 %b acquire
   ret i32 %1
 }
@@ -5937,6 +8749,16 @@ define i32 @atomicrmw_or_i32_release(i32
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amoor.w.rl a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_or_i32_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    call __atomic_fetch_or_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw or i32* %a, i32 %b release
   ret i32 %1
 }
@@ -5956,6 +8778,16 @@ define i32 @atomicrmw_or_i32_acq_rel(i32
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amoor.w.aqrl a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_or_i32_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    call __atomic_fetch_or_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw or i32* %a, i32 %b acq_rel
   ret i32 %1
 }
@@ -5975,6 +8807,16 @@ define i32 @atomicrmw_or_i32_seq_cst(i32
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amoor.w.aqrl a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_or_i32_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    call __atomic_fetch_or_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw or i32* %a, i32 %b seq_cst
   ret i32 %1
 }
@@ -5994,6 +8836,16 @@ define i32 @atomicrmw_xor_i32_monotonic(
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amoxor.w a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xor_i32_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_fetch_xor_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xor i32* %a, i32 %b monotonic
   ret i32 %1
 }
@@ -6013,6 +8865,16 @@ define i32 @atomicrmw_xor_i32_acquire(i3
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amoxor.w.aq a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xor_i32_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    call __atomic_fetch_xor_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xor i32* %a, i32 %b acquire
   ret i32 %1
 }
@@ -6032,6 +8894,16 @@ define i32 @atomicrmw_xor_i32_release(i3
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amoxor.w.rl a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xor_i32_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    call __atomic_fetch_xor_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xor i32* %a, i32 %b release
   ret i32 %1
 }
@@ -6051,6 +8923,16 @@ define i32 @atomicrmw_xor_i32_acq_rel(i3
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amoxor.w.aqrl a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xor_i32_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    call __atomic_fetch_xor_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xor i32* %a, i32 %b acq_rel
   ret i32 %1
 }
@@ -6070,6 +8952,16 @@ define i32 @atomicrmw_xor_i32_seq_cst(i3
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amoxor.w.aqrl a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xor_i32_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    call __atomic_fetch_xor_4
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xor i32* %a, i32 %b seq_cst
   ret i32 %1
 }
@@ -6115,6 +9007,47 @@ define i32 @atomicrmw_max_i32_monotonic(
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amomax.w a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_max_i32_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -48
+; RV64I-NEXT:    sd ra, 40(sp)
+; RV64I-NEXT:    sd s1, 32(sp)
+; RV64I-NEXT:    sd s2, 24(sp)
+; RV64I-NEXT:    sd s3, 16(sp)
+; RV64I-NEXT:    sd s4, 8(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lwu a0, 0(a0)
+; RV64I-NEXT:    sext.w s1, a1
+; RV64I-NEXT:    addi s3, sp, 4
+; RV64I-NEXT:  .LBB145_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sext.w a1, a0
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    blt s1, a1, .LBB145_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB145_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB145_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB145_1 Depth=1
+; RV64I-NEXT:    sw a0, 4(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, zero
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lw a0, 4(sp)
+; RV64I-NEXT:    beqz a1, .LBB145_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s4, 8(sp)
+; RV64I-NEXT:    ld s3, 16(sp)
+; RV64I-NEXT:    ld s2, 24(sp)
+; RV64I-NEXT:    ld s1, 32(sp)
+; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    ret
   %1 = atomicrmw max i32* %a, i32 %b monotonic
   ret i32 %1
 }
@@ -6163,6 +9096,50 @@ define i32 @atomicrmw_max_i32_acquire(i3
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amomax.w.aq a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_max_i32_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lwu a0, 0(a0)
+; RV64I-NEXT:    sext.w s5, a1
+; RV64I-NEXT:    addi s3, sp, 12
+; RV64I-NEXT:    addi s1, zero, 2
+; RV64I-NEXT:  .LBB146_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sext.w a1, a0
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    blt s5, a1, .LBB146_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB146_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB146_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB146_1 Depth=1
+; RV64I-NEXT:    sw a0, 12(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s1
+; RV64I-NEXT:    mv a4, s1
+; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lw a0, 12(sp)
+; RV64I-NEXT:    beqz a1, .LBB146_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw max i32* %a, i32 %b acquire
   ret i32 %1
 }
@@ -6211,6 +9188,50 @@ define i32 @atomicrmw_max_i32_release(i3
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amomax.w.rl a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_max_i32_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s5, a0
+; RV64I-NEXT:    lwu a0, 0(a0)
+; RV64I-NEXT:    sext.w s1, a1
+; RV64I-NEXT:    addi s3, sp, 12
+; RV64I-NEXT:    addi s4, zero, 3
+; RV64I-NEXT:  .LBB147_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sext.w a1, a0
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    blt s1, a1, .LBB147_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB147_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB147_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB147_1 Depth=1
+; RV64I-NEXT:    sw a0, 12(sp)
+; RV64I-NEXT:    mv a0, s5
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lw a0, 12(sp)
+; RV64I-NEXT:    beqz a1, .LBB147_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw max i32* %a, i32 %b release
   ret i32 %1
 }
@@ -6262,6 +9283,53 @@ define i32 @atomicrmw_max_i32_acq_rel(i3
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amomax.w.aqrl a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_max_i32_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    sd s6, 8(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s6, a0
+; RV64I-NEXT:    lwu a0, 0(a0)
+; RV64I-NEXT:    sext.w s1, a1
+; RV64I-NEXT:    addi s3, sp, 4
+; RV64I-NEXT:    addi s4, zero, 4
+; RV64I-NEXT:    addi s5, zero, 2
+; RV64I-NEXT:  .LBB148_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sext.w a1, a0
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    blt s1, a1, .LBB148_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB148_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB148_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB148_1 Depth=1
+; RV64I-NEXT:    sw a0, 4(sp)
+; RV64I-NEXT:    mv a0, s6
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, s5
+; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lw a0, 4(sp)
+; RV64I-NEXT:    beqz a1, .LBB148_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s6, 8(sp)
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw max i32* %a, i32 %b acq_rel
   ret i32 %1
 }
@@ -6310,6 +9378,50 @@ define i32 @atomicrmw_max_i32_seq_cst(i3
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amomax.w.aqrl a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_max_i32_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lwu a0, 0(a0)
+; RV64I-NEXT:    sext.w s5, a1
+; RV64I-NEXT:    addi s3, sp, 12
+; RV64I-NEXT:    addi s1, zero, 5
+; RV64I-NEXT:  .LBB149_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sext.w a1, a0
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    blt s5, a1, .LBB149_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB149_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB149_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB149_1 Depth=1
+; RV64I-NEXT:    sw a0, 12(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s1
+; RV64I-NEXT:    mv a4, s1
+; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lw a0, 12(sp)
+; RV64I-NEXT:    beqz a1, .LBB149_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw max i32* %a, i32 %b seq_cst
   ret i32 %1
 }
@@ -6355,6 +9467,47 @@ define i32 @atomicrmw_min_i32_monotonic(
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amomin.w a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_min_i32_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -48
+; RV64I-NEXT:    sd ra, 40(sp)
+; RV64I-NEXT:    sd s1, 32(sp)
+; RV64I-NEXT:    sd s2, 24(sp)
+; RV64I-NEXT:    sd s3, 16(sp)
+; RV64I-NEXT:    sd s4, 8(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lwu a0, 0(a0)
+; RV64I-NEXT:    sext.w s1, a1
+; RV64I-NEXT:    addi s3, sp, 4
+; RV64I-NEXT:  .LBB150_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sext.w a1, a0
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bge s1, a1, .LBB150_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB150_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB150_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB150_1 Depth=1
+; RV64I-NEXT:    sw a0, 4(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, zero
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lw a0, 4(sp)
+; RV64I-NEXT:    beqz a1, .LBB150_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s4, 8(sp)
+; RV64I-NEXT:    ld s3, 16(sp)
+; RV64I-NEXT:    ld s2, 24(sp)
+; RV64I-NEXT:    ld s1, 32(sp)
+; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    ret
   %1 = atomicrmw min i32* %a, i32 %b monotonic
   ret i32 %1
 }
@@ -6403,6 +9556,50 @@ define i32 @atomicrmw_min_i32_acquire(i3
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amomin.w.aq a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_min_i32_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lwu a0, 0(a0)
+; RV64I-NEXT:    sext.w s5, a1
+; RV64I-NEXT:    addi s3, sp, 12
+; RV64I-NEXT:    addi s1, zero, 2
+; RV64I-NEXT:  .LBB151_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sext.w a1, a0
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bge s5, a1, .LBB151_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB151_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB151_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB151_1 Depth=1
+; RV64I-NEXT:    sw a0, 12(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s1
+; RV64I-NEXT:    mv a4, s1
+; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lw a0, 12(sp)
+; RV64I-NEXT:    beqz a1, .LBB151_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw min i32* %a, i32 %b acquire
   ret i32 %1
 }
@@ -6451,6 +9648,50 @@ define i32 @atomicrmw_min_i32_release(i3
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amomin.w.rl a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_min_i32_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s5, a0
+; RV64I-NEXT:    lwu a0, 0(a0)
+; RV64I-NEXT:    sext.w s1, a1
+; RV64I-NEXT:    addi s3, sp, 12
+; RV64I-NEXT:    addi s4, zero, 3
+; RV64I-NEXT:  .LBB152_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sext.w a1, a0
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bge s1, a1, .LBB152_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB152_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB152_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB152_1 Depth=1
+; RV64I-NEXT:    sw a0, 12(sp)
+; RV64I-NEXT:    mv a0, s5
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lw a0, 12(sp)
+; RV64I-NEXT:    beqz a1, .LBB152_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw min i32* %a, i32 %b release
   ret i32 %1
 }
@@ -6502,6 +9743,53 @@ define i32 @atomicrmw_min_i32_acq_rel(i3
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amomin.w.aqrl a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_min_i32_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    sd s6, 8(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s6, a0
+; RV64I-NEXT:    lwu a0, 0(a0)
+; RV64I-NEXT:    sext.w s1, a1
+; RV64I-NEXT:    addi s3, sp, 4
+; RV64I-NEXT:    addi s4, zero, 4
+; RV64I-NEXT:    addi s5, zero, 2
+; RV64I-NEXT:  .LBB153_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sext.w a1, a0
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bge s1, a1, .LBB153_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB153_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB153_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB153_1 Depth=1
+; RV64I-NEXT:    sw a0, 4(sp)
+; RV64I-NEXT:    mv a0, s6
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, s5
+; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lw a0, 4(sp)
+; RV64I-NEXT:    beqz a1, .LBB153_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s6, 8(sp)
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw min i32* %a, i32 %b acq_rel
   ret i32 %1
 }
@@ -6550,6 +9838,50 @@ define i32 @atomicrmw_min_i32_seq_cst(i3
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amomin.w.aqrl a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_min_i32_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lwu a0, 0(a0)
+; RV64I-NEXT:    sext.w s5, a1
+; RV64I-NEXT:    addi s3, sp, 12
+; RV64I-NEXT:    addi s1, zero, 5
+; RV64I-NEXT:  .LBB154_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sext.w a1, a0
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bge s5, a1, .LBB154_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB154_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB154_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB154_1 Depth=1
+; RV64I-NEXT:    sw a0, 12(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s1
+; RV64I-NEXT:    mv a4, s1
+; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lw a0, 12(sp)
+; RV64I-NEXT:    beqz a1, .LBB154_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw min i32* %a, i32 %b seq_cst
   ret i32 %1
 }
@@ -6595,6 +9927,47 @@ define i32 @atomicrmw_umax_i32_monotonic
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amomaxu.w a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umax_i32_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -48
+; RV64I-NEXT:    sd ra, 40(sp)
+; RV64I-NEXT:    sd s1, 32(sp)
+; RV64I-NEXT:    sd s2, 24(sp)
+; RV64I-NEXT:    sd s3, 16(sp)
+; RV64I-NEXT:    sd s4, 8(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lwu a0, 0(a0)
+; RV64I-NEXT:    sext.w s1, a1
+; RV64I-NEXT:    addi s3, sp, 4
+; RV64I-NEXT:  .LBB155_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sext.w a1, a0
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bltu s1, a1, .LBB155_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB155_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB155_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB155_1 Depth=1
+; RV64I-NEXT:    sw a0, 4(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, zero
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lw a0, 4(sp)
+; RV64I-NEXT:    beqz a1, .LBB155_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s4, 8(sp)
+; RV64I-NEXT:    ld s3, 16(sp)
+; RV64I-NEXT:    ld s2, 24(sp)
+; RV64I-NEXT:    ld s1, 32(sp)
+; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    ret
   %1 = atomicrmw umax i32* %a, i32 %b monotonic
   ret i32 %1
 }
@@ -6643,6 +10016,50 @@ define i32 @atomicrmw_umax_i32_acquire(i
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amomaxu.w.aq a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umax_i32_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lwu a0, 0(a0)
+; RV64I-NEXT:    sext.w s5, a1
+; RV64I-NEXT:    addi s3, sp, 12
+; RV64I-NEXT:    addi s1, zero, 2
+; RV64I-NEXT:  .LBB156_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sext.w a1, a0
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bltu s5, a1, .LBB156_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB156_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB156_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB156_1 Depth=1
+; RV64I-NEXT:    sw a0, 12(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s1
+; RV64I-NEXT:    mv a4, s1
+; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lw a0, 12(sp)
+; RV64I-NEXT:    beqz a1, .LBB156_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw umax i32* %a, i32 %b acquire
   ret i32 %1
 }
@@ -6691,6 +10108,50 @@ define i32 @atomicrmw_umax_i32_release(i
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amomaxu.w.rl a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umax_i32_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s5, a0
+; RV64I-NEXT:    lwu a0, 0(a0)
+; RV64I-NEXT:    sext.w s1, a1
+; RV64I-NEXT:    addi s3, sp, 12
+; RV64I-NEXT:    addi s4, zero, 3
+; RV64I-NEXT:  .LBB157_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sext.w a1, a0
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bltu s1, a1, .LBB157_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB157_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB157_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB157_1 Depth=1
+; RV64I-NEXT:    sw a0, 12(sp)
+; RV64I-NEXT:    mv a0, s5
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lw a0, 12(sp)
+; RV64I-NEXT:    beqz a1, .LBB157_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw umax i32* %a, i32 %b release
   ret i32 %1
 }
@@ -6742,6 +10203,53 @@ define i32 @atomicrmw_umax_i32_acq_rel(i
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amomaxu.w.aqrl a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umax_i32_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    sd s6, 8(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s6, a0
+; RV64I-NEXT:    lwu a0, 0(a0)
+; RV64I-NEXT:    sext.w s1, a1
+; RV64I-NEXT:    addi s3, sp, 4
+; RV64I-NEXT:    addi s4, zero, 4
+; RV64I-NEXT:    addi s5, zero, 2
+; RV64I-NEXT:  .LBB158_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sext.w a1, a0
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bltu s1, a1, .LBB158_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB158_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB158_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB158_1 Depth=1
+; RV64I-NEXT:    sw a0, 4(sp)
+; RV64I-NEXT:    mv a0, s6
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, s5
+; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lw a0, 4(sp)
+; RV64I-NEXT:    beqz a1, .LBB158_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s6, 8(sp)
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw umax i32* %a, i32 %b acq_rel
   ret i32 %1
 }
@@ -6790,6 +10298,50 @@ define i32 @atomicrmw_umax_i32_seq_cst(i
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amomaxu.w.aqrl a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umax_i32_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lwu a0, 0(a0)
+; RV64I-NEXT:    sext.w s5, a1
+; RV64I-NEXT:    addi s3, sp, 12
+; RV64I-NEXT:    addi s1, zero, 5
+; RV64I-NEXT:  .LBB159_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sext.w a1, a0
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bltu s5, a1, .LBB159_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB159_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB159_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB159_1 Depth=1
+; RV64I-NEXT:    sw a0, 12(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s1
+; RV64I-NEXT:    mv a4, s1
+; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lw a0, 12(sp)
+; RV64I-NEXT:    beqz a1, .LBB159_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw umax i32* %a, i32 %b seq_cst
   ret i32 %1
 }
@@ -6835,6 +10387,47 @@ define i32 @atomicrmw_umin_i32_monotonic
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amominu.w a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umin_i32_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -48
+; RV64I-NEXT:    sd ra, 40(sp)
+; RV64I-NEXT:    sd s1, 32(sp)
+; RV64I-NEXT:    sd s2, 24(sp)
+; RV64I-NEXT:    sd s3, 16(sp)
+; RV64I-NEXT:    sd s4, 8(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lwu a0, 0(a0)
+; RV64I-NEXT:    sext.w s1, a1
+; RV64I-NEXT:    addi s3, sp, 4
+; RV64I-NEXT:  .LBB160_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sext.w a1, a0
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bgeu s1, a1, .LBB160_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB160_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB160_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB160_1 Depth=1
+; RV64I-NEXT:    sw a0, 4(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, zero
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lw a0, 4(sp)
+; RV64I-NEXT:    beqz a1, .LBB160_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s4, 8(sp)
+; RV64I-NEXT:    ld s3, 16(sp)
+; RV64I-NEXT:    ld s2, 24(sp)
+; RV64I-NEXT:    ld s1, 32(sp)
+; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    ret
   %1 = atomicrmw umin i32* %a, i32 %b monotonic
   ret i32 %1
 }
@@ -6883,6 +10476,50 @@ define i32 @atomicrmw_umin_i32_acquire(i
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amominu.w.aq a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umin_i32_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lwu a0, 0(a0)
+; RV64I-NEXT:    sext.w s5, a1
+; RV64I-NEXT:    addi s3, sp, 12
+; RV64I-NEXT:    addi s1, zero, 2
+; RV64I-NEXT:  .LBB161_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sext.w a1, a0
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bgeu s5, a1, .LBB161_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB161_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB161_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB161_1 Depth=1
+; RV64I-NEXT:    sw a0, 12(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s1
+; RV64I-NEXT:    mv a4, s1
+; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lw a0, 12(sp)
+; RV64I-NEXT:    beqz a1, .LBB161_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw umin i32* %a, i32 %b acquire
   ret i32 %1
 }
@@ -6931,6 +10568,50 @@ define i32 @atomicrmw_umin_i32_release(i
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amominu.w.rl a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umin_i32_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s5, a0
+; RV64I-NEXT:    lwu a0, 0(a0)
+; RV64I-NEXT:    sext.w s1, a1
+; RV64I-NEXT:    addi s3, sp, 12
+; RV64I-NEXT:    addi s4, zero, 3
+; RV64I-NEXT:  .LBB162_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sext.w a1, a0
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bgeu s1, a1, .LBB162_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB162_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB162_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB162_1 Depth=1
+; RV64I-NEXT:    sw a0, 12(sp)
+; RV64I-NEXT:    mv a0, s5
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lw a0, 12(sp)
+; RV64I-NEXT:    beqz a1, .LBB162_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw umin i32* %a, i32 %b release
   ret i32 %1
 }
@@ -6982,6 +10663,53 @@ define i32 @atomicrmw_umin_i32_acq_rel(i
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amominu.w.aqrl a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umin_i32_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    sd s6, 8(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s6, a0
+; RV64I-NEXT:    lwu a0, 0(a0)
+; RV64I-NEXT:    sext.w s1, a1
+; RV64I-NEXT:    addi s3, sp, 4
+; RV64I-NEXT:    addi s4, zero, 4
+; RV64I-NEXT:    addi s5, zero, 2
+; RV64I-NEXT:  .LBB163_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sext.w a1, a0
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bgeu s1, a1, .LBB163_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB163_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB163_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB163_1 Depth=1
+; RV64I-NEXT:    sw a0, 4(sp)
+; RV64I-NEXT:    mv a0, s6
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, s5
+; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lw a0, 4(sp)
+; RV64I-NEXT:    beqz a1, .LBB163_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s6, 8(sp)
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw umin i32* %a, i32 %b acq_rel
   ret i32 %1
 }
@@ -7030,6 +10758,50 @@ define i32 @atomicrmw_umin_i32_seq_cst(i
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    amominu.w.aqrl a0, a1, (a0)
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umin_i32_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s4, a0
+; RV64I-NEXT:    lwu a0, 0(a0)
+; RV64I-NEXT:    sext.w s5, a1
+; RV64I-NEXT:    addi s3, sp, 12
+; RV64I-NEXT:    addi s1, zero, 5
+; RV64I-NEXT:  .LBB164_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sext.w a1, a0
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bgeu s5, a1, .LBB164_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB164_1 Depth=1
+; RV64I-NEXT:    mv a2, s2
+; RV64I-NEXT:  .LBB164_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB164_1 Depth=1
+; RV64I-NEXT:    sw a0, 12(sp)
+; RV64I-NEXT:    mv a0, s4
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s1
+; RV64I-NEXT:    mv a4, s1
+; RV64I-NEXT:    call __atomic_compare_exchange_4
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    lw a0, 12(sp)
+; RV64I-NEXT:    beqz a1, .LBB164_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw umin i32* %a, i32 %b seq_cst
   ret i32 %1
 }
@@ -7054,6 +10826,16 @@ define i64 @atomicrmw_xchg_i64_monotonic
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xchg_i64_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_exchange_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xchg i64* %a, i64 %b monotonic
   ret i64 %1
 }
@@ -7078,6 +10860,16 @@ define i64 @atomicrmw_xchg_i64_acquire(i
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xchg_i64_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    call __atomic_exchange_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xchg i64* %a, i64 %b acquire
   ret i64 %1
 }
@@ -7102,6 +10894,16 @@ define i64 @atomicrmw_xchg_i64_release(i
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xchg_i64_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    call __atomic_exchange_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xchg i64* %a, i64 %b release
   ret i64 %1
 }
@@ -7126,6 +10928,16 @@ define i64 @atomicrmw_xchg_i64_acq_rel(i
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xchg_i64_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    call __atomic_exchange_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xchg i64* %a, i64 %b acq_rel
   ret i64 %1
 }
@@ -7150,6 +10962,16 @@ define i64 @atomicrmw_xchg_i64_seq_cst(i
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xchg_i64_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    call __atomic_exchange_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xchg i64* %a, i64 %b seq_cst
   ret i64 %1
 }
@@ -7174,6 +10996,16 @@ define i64 @atomicrmw_add_i64_monotonic(
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_add_i64_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_fetch_add_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw add i64* %a, i64 %b monotonic
   ret i64 %1
 }
@@ -7198,6 +11030,16 @@ define i64 @atomicrmw_add_i64_acquire(i6
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_add_i64_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    call __atomic_fetch_add_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw add i64* %a, i64 %b acquire
   ret i64 %1
 }
@@ -7222,6 +11064,16 @@ define i64 @atomicrmw_add_i64_release(i6
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_add_i64_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    call __atomic_fetch_add_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw add i64* %a, i64 %b release
   ret i64 %1
 }
@@ -7246,6 +11098,16 @@ define i64 @atomicrmw_add_i64_acq_rel(i6
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_add_i64_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    call __atomic_fetch_add_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw add i64* %a, i64 %b acq_rel
   ret i64 %1
 }
@@ -7270,6 +11132,16 @@ define i64 @atomicrmw_add_i64_seq_cst(i6
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_add_i64_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    call __atomic_fetch_add_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw add i64* %a, i64 %b seq_cst
   ret i64 %1
 }
@@ -7294,6 +11166,16 @@ define i64 @atomicrmw_sub_i64_monotonic(
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_sub_i64_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_fetch_sub_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw sub i64* %a, i64 %b monotonic
   ret i64 %1
 }
@@ -7318,6 +11200,16 @@ define i64 @atomicrmw_sub_i64_acquire(i6
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_sub_i64_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    call __atomic_fetch_sub_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw sub i64* %a, i64 %b acquire
   ret i64 %1
 }
@@ -7342,6 +11234,16 @@ define i64 @atomicrmw_sub_i64_release(i6
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_sub_i64_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    call __atomic_fetch_sub_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw sub i64* %a, i64 %b release
   ret i64 %1
 }
@@ -7366,6 +11268,16 @@ define i64 @atomicrmw_sub_i64_acq_rel(i6
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_sub_i64_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    call __atomic_fetch_sub_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw sub i64* %a, i64 %b acq_rel
   ret i64 %1
 }
@@ -7390,6 +11302,16 @@ define i64 @atomicrmw_sub_i64_seq_cst(i6
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_sub_i64_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    call __atomic_fetch_sub_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw sub i64* %a, i64 %b seq_cst
   ret i64 %1
 }
@@ -7414,6 +11336,16 @@ define i64 @atomicrmw_and_i64_monotonic(
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_and_i64_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_fetch_and_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw and i64* %a, i64 %b monotonic
   ret i64 %1
 }
@@ -7438,6 +11370,16 @@ define i64 @atomicrmw_and_i64_acquire(i6
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_and_i64_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    call __atomic_fetch_and_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw and i64* %a, i64 %b acquire
   ret i64 %1
 }
@@ -7462,6 +11404,16 @@ define i64 @atomicrmw_and_i64_release(i6
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_and_i64_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    call __atomic_fetch_and_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw and i64* %a, i64 %b release
   ret i64 %1
 }
@@ -7486,6 +11438,16 @@ define i64 @atomicrmw_and_i64_acq_rel(i6
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_and_i64_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    call __atomic_fetch_and_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw and i64* %a, i64 %b acq_rel
   ret i64 %1
 }
@@ -7510,6 +11472,16 @@ define i64 @atomicrmw_and_i64_seq_cst(i6
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_and_i64_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    call __atomic_fetch_and_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw and i64* %a, i64 %b seq_cst
   ret i64 %1
 }
@@ -7534,6 +11506,16 @@ define i64 @atomicrmw_nand_i64_monotonic
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_nand_i64_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_fetch_nand_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw nand i64* %a, i64 %b monotonic
   ret i64 %1
 }
@@ -7558,6 +11540,16 @@ define i64 @atomicrmw_nand_i64_acquire(i
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_nand_i64_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    call __atomic_fetch_nand_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw nand i64* %a, i64 %b acquire
   ret i64 %1
 }
@@ -7582,6 +11574,16 @@ define i64 @atomicrmw_nand_i64_release(i
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_nand_i64_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    call __atomic_fetch_nand_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw nand i64* %a, i64 %b release
   ret i64 %1
 }
@@ -7606,6 +11608,16 @@ define i64 @atomicrmw_nand_i64_acq_rel(i
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_nand_i64_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    call __atomic_fetch_nand_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw nand i64* %a, i64 %b acq_rel
   ret i64 %1
 }
@@ -7630,6 +11642,16 @@ define i64 @atomicrmw_nand_i64_seq_cst(i
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_nand_i64_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    call __atomic_fetch_nand_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw nand i64* %a, i64 %b seq_cst
   ret i64 %1
 }
@@ -7654,6 +11676,16 @@ define i64 @atomicrmw_or_i64_monotonic(i
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_or_i64_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_fetch_or_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw or i64* %a, i64 %b monotonic
   ret i64 %1
 }
@@ -7678,6 +11710,16 @@ define i64 @atomicrmw_or_i64_acquire(i64
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_or_i64_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    call __atomic_fetch_or_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw or i64* %a, i64 %b acquire
   ret i64 %1
 }
@@ -7702,6 +11744,16 @@ define i64 @atomicrmw_or_i64_release(i64
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_or_i64_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    call __atomic_fetch_or_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw or i64* %a, i64 %b release
   ret i64 %1
 }
@@ -7726,6 +11778,16 @@ define i64 @atomicrmw_or_i64_acq_rel(i64
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_or_i64_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    call __atomic_fetch_or_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw or i64* %a, i64 %b acq_rel
   ret i64 %1
 }
@@ -7750,6 +11812,16 @@ define i64 @atomicrmw_or_i64_seq_cst(i64
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_or_i64_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    call __atomic_fetch_or_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw or i64* %a, i64 %b seq_cst
   ret i64 %1
 }
@@ -7774,6 +11846,16 @@ define i64 @atomicrmw_xor_i64_monotonic(
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xor_i64_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    call __atomic_fetch_xor_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xor i64* %a, i64 %b monotonic
   ret i64 %1
 }
@@ -7798,6 +11880,16 @@ define i64 @atomicrmw_xor_i64_acquire(i6
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xor_i64_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    call __atomic_fetch_xor_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xor i64* %a, i64 %b acquire
   ret i64 %1
 }
@@ -7822,6 +11914,16 @@ define i64 @atomicrmw_xor_i64_release(i6
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xor_i64_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    call __atomic_fetch_xor_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xor i64* %a, i64 %b release
   ret i64 %1
 }
@@ -7846,6 +11948,16 @@ define i64 @atomicrmw_xor_i64_acq_rel(i6
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xor_i64_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    call __atomic_fetch_xor_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xor i64* %a, i64 %b acq_rel
   ret i64 %1
 }
@@ -7870,6 +11982,16 @@ define i64 @atomicrmw_xor_i64_seq_cst(i6
 ; RV32IA-NEXT:    lw ra, 12(sp)
 ; RV32IA-NEXT:    addi sp, sp, 16
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xor_i64_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp)
+; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    call __atomic_fetch_xor_8
+; RV64I-NEXT:    ld ra, 8(sp)
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
   %1 = atomicrmw xor i64* %a, i64 %b seq_cst
   ret i64 %1
 }
@@ -7990,6 +12112,42 @@ define i64 @atomicrmw_max_i64_monotonic(
 ; RV32IA-NEXT:    lw ra, 28(sp)
 ; RV32IA-NEXT:    addi sp, sp, 32
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_max_i64_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -48
+; RV64I-NEXT:    sd ra, 40(sp)
+; RV64I-NEXT:    sd s1, 32(sp)
+; RV64I-NEXT:    sd s2, 24(sp)
+; RV64I-NEXT:    sd s3, 16(sp)
+; RV64I-NEXT:    mv s1, a1
+; RV64I-NEXT:    mv s2, a0
+; RV64I-NEXT:    ld a2, 0(a0)
+; RV64I-NEXT:    addi s3, sp, 8
+; RV64I-NEXT:  .LBB200_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sd a2, 8(sp)
+; RV64I-NEXT:    blt s1, a2, .LBB200_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB200_1 Depth=1
+; RV64I-NEXT:    mv a2, s1
+; RV64I-NEXT:  .LBB200_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB200_1 Depth=1
+; RV64I-NEXT:    mv a0, s2
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, zero
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    ld a2, 8(sp)
+; RV64I-NEXT:    beqz a0, .LBB200_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    mv a0, a2
+; RV64I-NEXT:    ld s3, 16(sp)
+; RV64I-NEXT:    ld s2, 24(sp)
+; RV64I-NEXT:    ld s1, 32(sp)
+; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    ret
   %1 = atomicrmw max i64* %a, i64 %b monotonic
   ret i64 %1
 }
@@ -8116,6 +12274,45 @@ define i64 @atomicrmw_max_i64_acquire(i6
 ; RV32IA-NEXT:    lw ra, 28(sp)
 ; RV32IA-NEXT:    addi sp, sp, 32
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_max_i64_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -48
+; RV64I-NEXT:    sd ra, 40(sp)
+; RV64I-NEXT:    sd s1, 32(sp)
+; RV64I-NEXT:    sd s2, 24(sp)
+; RV64I-NEXT:    sd s3, 16(sp)
+; RV64I-NEXT:    sd s4, 8(sp)
+; RV64I-NEXT:    mv s1, a1
+; RV64I-NEXT:    mv s2, a0
+; RV64I-NEXT:    ld a2, 0(a0)
+; RV64I-NEXT:    mv s3, sp
+; RV64I-NEXT:    addi s4, zero, 2
+; RV64I-NEXT:  .LBB201_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sd a2, 0(sp)
+; RV64I-NEXT:    blt s1, a2, .LBB201_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB201_1 Depth=1
+; RV64I-NEXT:    mv a2, s1
+; RV64I-NEXT:  .LBB201_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB201_1 Depth=1
+; RV64I-NEXT:    mv a0, s2
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, s4
+; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    ld a2, 0(sp)
+; RV64I-NEXT:    beqz a0, .LBB201_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    mv a0, a2
+; RV64I-NEXT:    ld s4, 8(sp)
+; RV64I-NEXT:    ld s3, 16(sp)
+; RV64I-NEXT:    ld s2, 24(sp)
+; RV64I-NEXT:    ld s1, 32(sp)
+; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    ret
   %1 = atomicrmw max i64* %a, i64 %b acquire
   ret i64 %1
 }
@@ -8242,6 +12439,45 @@ define i64 @atomicrmw_max_i64_release(i6
 ; RV32IA-NEXT:    lw ra, 28(sp)
 ; RV32IA-NEXT:    addi sp, sp, 32
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_max_i64_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -48
+; RV64I-NEXT:    sd ra, 40(sp)
+; RV64I-NEXT:    sd s1, 32(sp)
+; RV64I-NEXT:    sd s2, 24(sp)
+; RV64I-NEXT:    sd s3, 16(sp)
+; RV64I-NEXT:    sd s4, 8(sp)
+; RV64I-NEXT:    mv s1, a1
+; RV64I-NEXT:    mv s2, a0
+; RV64I-NEXT:    ld a2, 0(a0)
+; RV64I-NEXT:    mv s3, sp
+; RV64I-NEXT:    addi s4, zero, 3
+; RV64I-NEXT:  .LBB202_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sd a2, 0(sp)
+; RV64I-NEXT:    blt s1, a2, .LBB202_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB202_1 Depth=1
+; RV64I-NEXT:    mv a2, s1
+; RV64I-NEXT:  .LBB202_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB202_1 Depth=1
+; RV64I-NEXT:    mv a0, s2
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    ld a2, 0(sp)
+; RV64I-NEXT:    beqz a0, .LBB202_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    mv a0, a2
+; RV64I-NEXT:    ld s4, 8(sp)
+; RV64I-NEXT:    ld s3, 16(sp)
+; RV64I-NEXT:    ld s2, 24(sp)
+; RV64I-NEXT:    ld s1, 32(sp)
+; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    ret
   %1 = atomicrmw max i64* %a, i64 %b release
   ret i64 %1
 }
@@ -8374,6 +12610,48 @@ define i64 @atomicrmw_max_i64_acq_rel(i6
 ; RV32IA-NEXT:    lw ra, 44(sp)
 ; RV32IA-NEXT:    addi sp, sp, 48
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_max_i64_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s1, a1
+; RV64I-NEXT:    mv s2, a0
+; RV64I-NEXT:    ld a2, 0(a0)
+; RV64I-NEXT:    addi s3, sp, 8
+; RV64I-NEXT:    addi s4, zero, 4
+; RV64I-NEXT:    addi s5, zero, 2
+; RV64I-NEXT:  .LBB203_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sd a2, 8(sp)
+; RV64I-NEXT:    blt s1, a2, .LBB203_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB203_1 Depth=1
+; RV64I-NEXT:    mv a2, s1
+; RV64I-NEXT:  .LBB203_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB203_1 Depth=1
+; RV64I-NEXT:    mv a0, s2
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, s5
+; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    ld a2, 8(sp)
+; RV64I-NEXT:    beqz a0, .LBB203_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    mv a0, a2
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw max i64* %a, i64 %b acq_rel
   ret i64 %1
 }
@@ -8500,6 +12778,45 @@ define i64 @atomicrmw_max_i64_seq_cst(i6
 ; RV32IA-NEXT:    lw ra, 28(sp)
 ; RV32IA-NEXT:    addi sp, sp, 32
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_max_i64_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -48
+; RV64I-NEXT:    sd ra, 40(sp)
+; RV64I-NEXT:    sd s1, 32(sp)
+; RV64I-NEXT:    sd s2, 24(sp)
+; RV64I-NEXT:    sd s3, 16(sp)
+; RV64I-NEXT:    sd s4, 8(sp)
+; RV64I-NEXT:    mv s1, a1
+; RV64I-NEXT:    mv s2, a0
+; RV64I-NEXT:    ld a2, 0(a0)
+; RV64I-NEXT:    mv s3, sp
+; RV64I-NEXT:    addi s4, zero, 5
+; RV64I-NEXT:  .LBB204_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sd a2, 0(sp)
+; RV64I-NEXT:    blt s1, a2, .LBB204_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB204_1 Depth=1
+; RV64I-NEXT:    mv a2, s1
+; RV64I-NEXT:  .LBB204_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB204_1 Depth=1
+; RV64I-NEXT:    mv a0, s2
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, s4
+; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    ld a2, 0(sp)
+; RV64I-NEXT:    beqz a0, .LBB204_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    mv a0, a2
+; RV64I-NEXT:    ld s4, 8(sp)
+; RV64I-NEXT:    ld s3, 16(sp)
+; RV64I-NEXT:    ld s2, 24(sp)
+; RV64I-NEXT:    ld s1, 32(sp)
+; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    ret
   %1 = atomicrmw max i64* %a, i64 %b seq_cst
   ret i64 %1
 }
@@ -8622,6 +12939,42 @@ define i64 @atomicrmw_min_i64_monotonic(
 ; RV32IA-NEXT:    lw ra, 28(sp)
 ; RV32IA-NEXT:    addi sp, sp, 32
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_min_i64_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -48
+; RV64I-NEXT:    sd ra, 40(sp)
+; RV64I-NEXT:    sd s1, 32(sp)
+; RV64I-NEXT:    sd s2, 24(sp)
+; RV64I-NEXT:    sd s3, 16(sp)
+; RV64I-NEXT:    mv s1, a1
+; RV64I-NEXT:    mv s2, a0
+; RV64I-NEXT:    ld a2, 0(a0)
+; RV64I-NEXT:    addi s3, sp, 8
+; RV64I-NEXT:  .LBB205_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sd a2, 8(sp)
+; RV64I-NEXT:    bge s1, a2, .LBB205_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB205_1 Depth=1
+; RV64I-NEXT:    mv a2, s1
+; RV64I-NEXT:  .LBB205_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB205_1 Depth=1
+; RV64I-NEXT:    mv a0, s2
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, zero
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    ld a2, 8(sp)
+; RV64I-NEXT:    beqz a0, .LBB205_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    mv a0, a2
+; RV64I-NEXT:    ld s3, 16(sp)
+; RV64I-NEXT:    ld s2, 24(sp)
+; RV64I-NEXT:    ld s1, 32(sp)
+; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    ret
   %1 = atomicrmw min i64* %a, i64 %b monotonic
   ret i64 %1
 }
@@ -8750,6 +13103,45 @@ define i64 @atomicrmw_min_i64_acquire(i6
 ; RV32IA-NEXT:    lw ra, 28(sp)
 ; RV32IA-NEXT:    addi sp, sp, 32
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_min_i64_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -48
+; RV64I-NEXT:    sd ra, 40(sp)
+; RV64I-NEXT:    sd s1, 32(sp)
+; RV64I-NEXT:    sd s2, 24(sp)
+; RV64I-NEXT:    sd s3, 16(sp)
+; RV64I-NEXT:    sd s4, 8(sp)
+; RV64I-NEXT:    mv s1, a1
+; RV64I-NEXT:    mv s2, a0
+; RV64I-NEXT:    ld a2, 0(a0)
+; RV64I-NEXT:    mv s3, sp
+; RV64I-NEXT:    addi s4, zero, 2
+; RV64I-NEXT:  .LBB206_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sd a2, 0(sp)
+; RV64I-NEXT:    bge s1, a2, .LBB206_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB206_1 Depth=1
+; RV64I-NEXT:    mv a2, s1
+; RV64I-NEXT:  .LBB206_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB206_1 Depth=1
+; RV64I-NEXT:    mv a0, s2
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, s4
+; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    ld a2, 0(sp)
+; RV64I-NEXT:    beqz a0, .LBB206_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    mv a0, a2
+; RV64I-NEXT:    ld s4, 8(sp)
+; RV64I-NEXT:    ld s3, 16(sp)
+; RV64I-NEXT:    ld s2, 24(sp)
+; RV64I-NEXT:    ld s1, 32(sp)
+; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    ret
   %1 = atomicrmw min i64* %a, i64 %b acquire
   ret i64 %1
 }
@@ -8878,6 +13270,45 @@ define i64 @atomicrmw_min_i64_release(i6
 ; RV32IA-NEXT:    lw ra, 28(sp)
 ; RV32IA-NEXT:    addi sp, sp, 32
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_min_i64_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -48
+; RV64I-NEXT:    sd ra, 40(sp)
+; RV64I-NEXT:    sd s1, 32(sp)
+; RV64I-NEXT:    sd s2, 24(sp)
+; RV64I-NEXT:    sd s3, 16(sp)
+; RV64I-NEXT:    sd s4, 8(sp)
+; RV64I-NEXT:    mv s1, a1
+; RV64I-NEXT:    mv s2, a0
+; RV64I-NEXT:    ld a2, 0(a0)
+; RV64I-NEXT:    mv s3, sp
+; RV64I-NEXT:    addi s4, zero, 3
+; RV64I-NEXT:  .LBB207_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sd a2, 0(sp)
+; RV64I-NEXT:    bge s1, a2, .LBB207_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB207_1 Depth=1
+; RV64I-NEXT:    mv a2, s1
+; RV64I-NEXT:  .LBB207_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB207_1 Depth=1
+; RV64I-NEXT:    mv a0, s2
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    ld a2, 0(sp)
+; RV64I-NEXT:    beqz a0, .LBB207_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    mv a0, a2
+; RV64I-NEXT:    ld s4, 8(sp)
+; RV64I-NEXT:    ld s3, 16(sp)
+; RV64I-NEXT:    ld s2, 24(sp)
+; RV64I-NEXT:    ld s1, 32(sp)
+; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    ret
   %1 = atomicrmw min i64* %a, i64 %b release
   ret i64 %1
 }
@@ -9012,6 +13443,48 @@ define i64 @atomicrmw_min_i64_acq_rel(i6
 ; RV32IA-NEXT:    lw ra, 44(sp)
 ; RV32IA-NEXT:    addi sp, sp, 48
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_min_i64_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s1, a1
+; RV64I-NEXT:    mv s2, a0
+; RV64I-NEXT:    ld a2, 0(a0)
+; RV64I-NEXT:    addi s3, sp, 8
+; RV64I-NEXT:    addi s4, zero, 4
+; RV64I-NEXT:    addi s5, zero, 2
+; RV64I-NEXT:  .LBB208_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sd a2, 8(sp)
+; RV64I-NEXT:    bge s1, a2, .LBB208_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB208_1 Depth=1
+; RV64I-NEXT:    mv a2, s1
+; RV64I-NEXT:  .LBB208_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB208_1 Depth=1
+; RV64I-NEXT:    mv a0, s2
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, s5
+; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    ld a2, 8(sp)
+; RV64I-NEXT:    beqz a0, .LBB208_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    mv a0, a2
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw min i64* %a, i64 %b acq_rel
   ret i64 %1
 }
@@ -9140,6 +13613,45 @@ define i64 @atomicrmw_min_i64_seq_cst(i6
 ; RV32IA-NEXT:    lw ra, 28(sp)
 ; RV32IA-NEXT:    addi sp, sp, 32
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_min_i64_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -48
+; RV64I-NEXT:    sd ra, 40(sp)
+; RV64I-NEXT:    sd s1, 32(sp)
+; RV64I-NEXT:    sd s2, 24(sp)
+; RV64I-NEXT:    sd s3, 16(sp)
+; RV64I-NEXT:    sd s4, 8(sp)
+; RV64I-NEXT:    mv s1, a1
+; RV64I-NEXT:    mv s2, a0
+; RV64I-NEXT:    ld a2, 0(a0)
+; RV64I-NEXT:    mv s3, sp
+; RV64I-NEXT:    addi s4, zero, 5
+; RV64I-NEXT:  .LBB209_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sd a2, 0(sp)
+; RV64I-NEXT:    bge s1, a2, .LBB209_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB209_1 Depth=1
+; RV64I-NEXT:    mv a2, s1
+; RV64I-NEXT:  .LBB209_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB209_1 Depth=1
+; RV64I-NEXT:    mv a0, s2
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, s4
+; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    ld a2, 0(sp)
+; RV64I-NEXT:    beqz a0, .LBB209_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    mv a0, a2
+; RV64I-NEXT:    ld s4, 8(sp)
+; RV64I-NEXT:    ld s3, 16(sp)
+; RV64I-NEXT:    ld s2, 24(sp)
+; RV64I-NEXT:    ld s1, 32(sp)
+; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    ret
   %1 = atomicrmw min i64* %a, i64 %b seq_cst
   ret i64 %1
 }
@@ -9260,6 +13772,42 @@ define i64 @atomicrmw_umax_i64_monotonic
 ; RV32IA-NEXT:    lw ra, 28(sp)
 ; RV32IA-NEXT:    addi sp, sp, 32
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umax_i64_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -48
+; RV64I-NEXT:    sd ra, 40(sp)
+; RV64I-NEXT:    sd s1, 32(sp)
+; RV64I-NEXT:    sd s2, 24(sp)
+; RV64I-NEXT:    sd s3, 16(sp)
+; RV64I-NEXT:    mv s1, a1
+; RV64I-NEXT:    mv s2, a0
+; RV64I-NEXT:    ld a2, 0(a0)
+; RV64I-NEXT:    addi s3, sp, 8
+; RV64I-NEXT:  .LBB210_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sd a2, 8(sp)
+; RV64I-NEXT:    bltu s1, a2, .LBB210_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB210_1 Depth=1
+; RV64I-NEXT:    mv a2, s1
+; RV64I-NEXT:  .LBB210_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB210_1 Depth=1
+; RV64I-NEXT:    mv a0, s2
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, zero
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    ld a2, 8(sp)
+; RV64I-NEXT:    beqz a0, .LBB210_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    mv a0, a2
+; RV64I-NEXT:    ld s3, 16(sp)
+; RV64I-NEXT:    ld s2, 24(sp)
+; RV64I-NEXT:    ld s1, 32(sp)
+; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    ret
   %1 = atomicrmw umax i64* %a, i64 %b monotonic
   ret i64 %1
 }
@@ -9386,6 +13934,45 @@ define i64 @atomicrmw_umax_i64_acquire(i
 ; RV32IA-NEXT:    lw ra, 28(sp)
 ; RV32IA-NEXT:    addi sp, sp, 32
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umax_i64_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -48
+; RV64I-NEXT:    sd ra, 40(sp)
+; RV64I-NEXT:    sd s1, 32(sp)
+; RV64I-NEXT:    sd s2, 24(sp)
+; RV64I-NEXT:    sd s3, 16(sp)
+; RV64I-NEXT:    sd s4, 8(sp)
+; RV64I-NEXT:    mv s1, a1
+; RV64I-NEXT:    mv s2, a0
+; RV64I-NEXT:    ld a2, 0(a0)
+; RV64I-NEXT:    mv s3, sp
+; RV64I-NEXT:    addi s4, zero, 2
+; RV64I-NEXT:  .LBB211_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sd a2, 0(sp)
+; RV64I-NEXT:    bltu s1, a2, .LBB211_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB211_1 Depth=1
+; RV64I-NEXT:    mv a2, s1
+; RV64I-NEXT:  .LBB211_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB211_1 Depth=1
+; RV64I-NEXT:    mv a0, s2
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, s4
+; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    ld a2, 0(sp)
+; RV64I-NEXT:    beqz a0, .LBB211_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    mv a0, a2
+; RV64I-NEXT:    ld s4, 8(sp)
+; RV64I-NEXT:    ld s3, 16(sp)
+; RV64I-NEXT:    ld s2, 24(sp)
+; RV64I-NEXT:    ld s1, 32(sp)
+; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    ret
   %1 = atomicrmw umax i64* %a, i64 %b acquire
   ret i64 %1
 }
@@ -9512,6 +14099,45 @@ define i64 @atomicrmw_umax_i64_release(i
 ; RV32IA-NEXT:    lw ra, 28(sp)
 ; RV32IA-NEXT:    addi sp, sp, 32
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umax_i64_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -48
+; RV64I-NEXT:    sd ra, 40(sp)
+; RV64I-NEXT:    sd s1, 32(sp)
+; RV64I-NEXT:    sd s2, 24(sp)
+; RV64I-NEXT:    sd s3, 16(sp)
+; RV64I-NEXT:    sd s4, 8(sp)
+; RV64I-NEXT:    mv s1, a1
+; RV64I-NEXT:    mv s2, a0
+; RV64I-NEXT:    ld a2, 0(a0)
+; RV64I-NEXT:    mv s3, sp
+; RV64I-NEXT:    addi s4, zero, 3
+; RV64I-NEXT:  .LBB212_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sd a2, 0(sp)
+; RV64I-NEXT:    bltu s1, a2, .LBB212_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB212_1 Depth=1
+; RV64I-NEXT:    mv a2, s1
+; RV64I-NEXT:  .LBB212_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB212_1 Depth=1
+; RV64I-NEXT:    mv a0, s2
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    ld a2, 0(sp)
+; RV64I-NEXT:    beqz a0, .LBB212_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    mv a0, a2
+; RV64I-NEXT:    ld s4, 8(sp)
+; RV64I-NEXT:    ld s3, 16(sp)
+; RV64I-NEXT:    ld s2, 24(sp)
+; RV64I-NEXT:    ld s1, 32(sp)
+; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    ret
   %1 = atomicrmw umax i64* %a, i64 %b release
   ret i64 %1
 }
@@ -9644,6 +14270,48 @@ define i64 @atomicrmw_umax_i64_acq_rel(i
 ; RV32IA-NEXT:    lw ra, 44(sp)
 ; RV32IA-NEXT:    addi sp, sp, 48
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umax_i64_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s1, a1
+; RV64I-NEXT:    mv s2, a0
+; RV64I-NEXT:    ld a2, 0(a0)
+; RV64I-NEXT:    addi s3, sp, 8
+; RV64I-NEXT:    addi s4, zero, 4
+; RV64I-NEXT:    addi s5, zero, 2
+; RV64I-NEXT:  .LBB213_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sd a2, 8(sp)
+; RV64I-NEXT:    bltu s1, a2, .LBB213_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB213_1 Depth=1
+; RV64I-NEXT:    mv a2, s1
+; RV64I-NEXT:  .LBB213_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB213_1 Depth=1
+; RV64I-NEXT:    mv a0, s2
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, s5
+; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    ld a2, 8(sp)
+; RV64I-NEXT:    beqz a0, .LBB213_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    mv a0, a2
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw umax i64* %a, i64 %b acq_rel
   ret i64 %1
 }
@@ -9770,6 +14438,45 @@ define i64 @atomicrmw_umax_i64_seq_cst(i
 ; RV32IA-NEXT:    lw ra, 28(sp)
 ; RV32IA-NEXT:    addi sp, sp, 32
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umax_i64_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -48
+; RV64I-NEXT:    sd ra, 40(sp)
+; RV64I-NEXT:    sd s1, 32(sp)
+; RV64I-NEXT:    sd s2, 24(sp)
+; RV64I-NEXT:    sd s3, 16(sp)
+; RV64I-NEXT:    sd s4, 8(sp)
+; RV64I-NEXT:    mv s1, a1
+; RV64I-NEXT:    mv s2, a0
+; RV64I-NEXT:    ld a2, 0(a0)
+; RV64I-NEXT:    mv s3, sp
+; RV64I-NEXT:    addi s4, zero, 5
+; RV64I-NEXT:  .LBB214_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sd a2, 0(sp)
+; RV64I-NEXT:    bltu s1, a2, .LBB214_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB214_1 Depth=1
+; RV64I-NEXT:    mv a2, s1
+; RV64I-NEXT:  .LBB214_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB214_1 Depth=1
+; RV64I-NEXT:    mv a0, s2
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, s4
+; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    ld a2, 0(sp)
+; RV64I-NEXT:    beqz a0, .LBB214_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    mv a0, a2
+; RV64I-NEXT:    ld s4, 8(sp)
+; RV64I-NEXT:    ld s3, 16(sp)
+; RV64I-NEXT:    ld s2, 24(sp)
+; RV64I-NEXT:    ld s1, 32(sp)
+; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    ret
   %1 = atomicrmw umax i64* %a, i64 %b seq_cst
   ret i64 %1
 }
@@ -9892,6 +14599,42 @@ define i64 @atomicrmw_umin_i64_monotonic
 ; RV32IA-NEXT:    lw ra, 28(sp)
 ; RV32IA-NEXT:    addi sp, sp, 32
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umin_i64_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -48
+; RV64I-NEXT:    sd ra, 40(sp)
+; RV64I-NEXT:    sd s1, 32(sp)
+; RV64I-NEXT:    sd s2, 24(sp)
+; RV64I-NEXT:    sd s3, 16(sp)
+; RV64I-NEXT:    mv s1, a1
+; RV64I-NEXT:    mv s2, a0
+; RV64I-NEXT:    ld a2, 0(a0)
+; RV64I-NEXT:    addi s3, sp, 8
+; RV64I-NEXT:  .LBB215_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sd a2, 8(sp)
+; RV64I-NEXT:    bgeu s1, a2, .LBB215_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB215_1 Depth=1
+; RV64I-NEXT:    mv a2, s1
+; RV64I-NEXT:  .LBB215_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB215_1 Depth=1
+; RV64I-NEXT:    mv a0, s2
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, zero
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    ld a2, 8(sp)
+; RV64I-NEXT:    beqz a0, .LBB215_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    mv a0, a2
+; RV64I-NEXT:    ld s3, 16(sp)
+; RV64I-NEXT:    ld s2, 24(sp)
+; RV64I-NEXT:    ld s1, 32(sp)
+; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    ret
   %1 = atomicrmw umin i64* %a, i64 %b monotonic
   ret i64 %1
 }
@@ -10020,6 +14763,45 @@ define i64 @atomicrmw_umin_i64_acquire(i
 ; RV32IA-NEXT:    lw ra, 28(sp)
 ; RV32IA-NEXT:    addi sp, sp, 32
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umin_i64_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -48
+; RV64I-NEXT:    sd ra, 40(sp)
+; RV64I-NEXT:    sd s1, 32(sp)
+; RV64I-NEXT:    sd s2, 24(sp)
+; RV64I-NEXT:    sd s3, 16(sp)
+; RV64I-NEXT:    sd s4, 8(sp)
+; RV64I-NEXT:    mv s1, a1
+; RV64I-NEXT:    mv s2, a0
+; RV64I-NEXT:    ld a2, 0(a0)
+; RV64I-NEXT:    mv s3, sp
+; RV64I-NEXT:    addi s4, zero, 2
+; RV64I-NEXT:  .LBB216_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sd a2, 0(sp)
+; RV64I-NEXT:    bgeu s1, a2, .LBB216_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB216_1 Depth=1
+; RV64I-NEXT:    mv a2, s1
+; RV64I-NEXT:  .LBB216_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB216_1 Depth=1
+; RV64I-NEXT:    mv a0, s2
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, s4
+; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    ld a2, 0(sp)
+; RV64I-NEXT:    beqz a0, .LBB216_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    mv a0, a2
+; RV64I-NEXT:    ld s4, 8(sp)
+; RV64I-NEXT:    ld s3, 16(sp)
+; RV64I-NEXT:    ld s2, 24(sp)
+; RV64I-NEXT:    ld s1, 32(sp)
+; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    ret
   %1 = atomicrmw umin i64* %a, i64 %b acquire
   ret i64 %1
 }
@@ -10148,6 +14930,45 @@ define i64 @atomicrmw_umin_i64_release(i
 ; RV32IA-NEXT:    lw ra, 28(sp)
 ; RV32IA-NEXT:    addi sp, sp, 32
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umin_i64_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -48
+; RV64I-NEXT:    sd ra, 40(sp)
+; RV64I-NEXT:    sd s1, 32(sp)
+; RV64I-NEXT:    sd s2, 24(sp)
+; RV64I-NEXT:    sd s3, 16(sp)
+; RV64I-NEXT:    sd s4, 8(sp)
+; RV64I-NEXT:    mv s1, a1
+; RV64I-NEXT:    mv s2, a0
+; RV64I-NEXT:    ld a2, 0(a0)
+; RV64I-NEXT:    mv s3, sp
+; RV64I-NEXT:    addi s4, zero, 3
+; RV64I-NEXT:  .LBB217_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sd a2, 0(sp)
+; RV64I-NEXT:    bgeu s1, a2, .LBB217_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB217_1 Depth=1
+; RV64I-NEXT:    mv a2, s1
+; RV64I-NEXT:  .LBB217_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB217_1 Depth=1
+; RV64I-NEXT:    mv a0, s2
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    ld a2, 0(sp)
+; RV64I-NEXT:    beqz a0, .LBB217_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    mv a0, a2
+; RV64I-NEXT:    ld s4, 8(sp)
+; RV64I-NEXT:    ld s3, 16(sp)
+; RV64I-NEXT:    ld s2, 24(sp)
+; RV64I-NEXT:    ld s1, 32(sp)
+; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    ret
   %1 = atomicrmw umin i64* %a, i64 %b release
   ret i64 %1
 }
@@ -10282,6 +15103,48 @@ define i64 @atomicrmw_umin_i64_acq_rel(i
 ; RV32IA-NEXT:    lw ra, 44(sp)
 ; RV32IA-NEXT:    addi sp, sp, 48
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umin_i64_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -64
+; RV64I-NEXT:    sd ra, 56(sp)
+; RV64I-NEXT:    sd s1, 48(sp)
+; RV64I-NEXT:    sd s2, 40(sp)
+; RV64I-NEXT:    sd s3, 32(sp)
+; RV64I-NEXT:    sd s4, 24(sp)
+; RV64I-NEXT:    sd s5, 16(sp)
+; RV64I-NEXT:    mv s1, a1
+; RV64I-NEXT:    mv s2, a0
+; RV64I-NEXT:    ld a2, 0(a0)
+; RV64I-NEXT:    addi s3, sp, 8
+; RV64I-NEXT:    addi s4, zero, 4
+; RV64I-NEXT:    addi s5, zero, 2
+; RV64I-NEXT:  .LBB218_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sd a2, 8(sp)
+; RV64I-NEXT:    bgeu s1, a2, .LBB218_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB218_1 Depth=1
+; RV64I-NEXT:    mv a2, s1
+; RV64I-NEXT:  .LBB218_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB218_1 Depth=1
+; RV64I-NEXT:    mv a0, s2
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, s5
+; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    ld a2, 8(sp)
+; RV64I-NEXT:    beqz a0, .LBB218_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    mv a0, a2
+; RV64I-NEXT:    ld s5, 16(sp)
+; RV64I-NEXT:    ld s4, 24(sp)
+; RV64I-NEXT:    ld s3, 32(sp)
+; RV64I-NEXT:    ld s2, 40(sp)
+; RV64I-NEXT:    ld s1, 48(sp)
+; RV64I-NEXT:    ld ra, 56(sp)
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ret
   %1 = atomicrmw umin i64* %a, i64 %b acq_rel
   ret i64 %1
 }
@@ -10410,6 +15273,45 @@ define i64 @atomicrmw_umin_i64_seq_cst(i
 ; RV32IA-NEXT:    lw ra, 28(sp)
 ; RV32IA-NEXT:    addi sp, sp, 32
 ; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_umin_i64_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -48
+; RV64I-NEXT:    sd ra, 40(sp)
+; RV64I-NEXT:    sd s1, 32(sp)
+; RV64I-NEXT:    sd s2, 24(sp)
+; RV64I-NEXT:    sd s3, 16(sp)
+; RV64I-NEXT:    sd s4, 8(sp)
+; RV64I-NEXT:    mv s1, a1
+; RV64I-NEXT:    mv s2, a0
+; RV64I-NEXT:    ld a2, 0(a0)
+; RV64I-NEXT:    mv s3, sp
+; RV64I-NEXT:    addi s4, zero, 5
+; RV64I-NEXT:  .LBB219_1: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    sd a2, 0(sp)
+; RV64I-NEXT:    bgeu s1, a2, .LBB219_3
+; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB219_1 Depth=1
+; RV64I-NEXT:    mv a2, s1
+; RV64I-NEXT:  .LBB219_3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB219_1 Depth=1
+; RV64I-NEXT:    mv a0, s2
+; RV64I-NEXT:    mv a1, s3
+; RV64I-NEXT:    mv a3, s4
+; RV64I-NEXT:    mv a4, s4
+; RV64I-NEXT:    call __atomic_compare_exchange_8
+; RV64I-NEXT:    ld a2, 0(sp)
+; RV64I-NEXT:    beqz a0, .LBB219_1
+; RV64I-NEXT:  # %bb.4: # %atomicrmw.end
+; RV64I-NEXT:    mv a0, a2
+; RV64I-NEXT:    ld s4, 8(sp)
+; RV64I-NEXT:    ld s3, 16(sp)
+; RV64I-NEXT:    ld s2, 24(sp)
+; RV64I-NEXT:    ld s1, 32(sp)
+; RV64I-NEXT:    ld ra, 40(sp)
+; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    ret
   %1 = atomicrmw umin i64* %a, i64 %b seq_cst
   ret i64 %1
 }




More information about the llvm-commits mailing list