[llvm] fdac86c - [RISCV][test] Add atomicrmw test cases for suboptimal codegen report in #64090

Alex Bradbury via llvm-commits llvm-commits at lists.llvm.org
Tue Aug 1 07:31:27 PDT 2023


Author: Alex Bradbury
Date: 2023-08-01T15:30:09+01:00
New Revision: fdac86cce8fa7bcd4905aac25c8f533080181f30

URL: https://github.com/llvm/llvm-project/commit/fdac86cce8fa7bcd4905aac25c8f533080181f30
DIFF: https://github.com/llvm/llvm-project/commit/fdac86cce8fa7bcd4905aac25c8f533080181f30.diff

LOG: [RISCV][test] Add atomicrmw test cases for suboptimal codegen report in #64090

<https://github.com/llvm/llvm-project/issues/64090>

A forthcoming patch addresses these cases.

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/atomic-rmw.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/atomic-rmw.ll b/llvm/test/CodeGen/RISCV/atomic-rmw.ll
index ec27046bf7d08f..da558029260c36 100644
--- a/llvm/test/CodeGen/RISCV/atomic-rmw.ll
+++ b/llvm/test/CodeGen/RISCV/atomic-rmw.ll
@@ -332,6 +332,319 @@ define i8 @atomicrmw_xchg_i8_seq_cst(ptr %a, i8 %b) nounwind {
   ret i8 %1
 }
 
+; TODO: The following 'atomicrmw xchg a, 0` cases can be lowered to an amoand
+; with appropriate mask.
+
+define i8 @atomicrmw_xchg_0_i8_monotonic(ptr %a) nounwind {
+; RV32I-LABEL: atomicrmw_xchg_0_i8_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    li a1, 0
+; RV32I-NEXT:    li a2, 0
+; RV32I-NEXT:    call __atomic_exchange_1 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV32IA-LABEL: atomicrmw_xchg_0_i8_monotonic:
+; RV32IA:       # %bb.0:
+; RV32IA-NEXT:    andi a1, a0, -4
+; RV32IA-NEXT:    slli a0, a0, 3
+; RV32IA-NEXT:    li a2, 255
+; RV32IA-NEXT:    sll a2, a2, a0
+; RV32IA-NEXT:  .LBB5_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:    lr.w a3, (a1)
+; RV32IA-NEXT:    li a4, 0
+; RV32IA-NEXT:    xor a4, a3, a4
+; RV32IA-NEXT:    and a4, a4, a2
+; RV32IA-NEXT:    xor a4, a3, a4
+; RV32IA-NEXT:    sc.w a4, a4, (a1)
+; RV32IA-NEXT:    bnez a4, .LBB5_1
+; RV32IA-NEXT:  # %bb.2:
+; RV32IA-NEXT:    srl a0, a3, a0
+; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xchg_0_i8_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    li a1, 0
+; RV64I-NEXT:    li a2, 0
+; RV64I-NEXT:    call __atomic_exchange_1 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+;
+; RV64IA-LABEL: atomicrmw_xchg_0_i8_monotonic:
+; RV64IA:       # %bb.0:
+; RV64IA-NEXT:    andi a1, a0, -4
+; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    li a2, 255
+; RV64IA-NEXT:    sllw a2, a2, a0
+; RV64IA-NEXT:  .LBB5_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:    lr.w a3, (a1)
+; RV64IA-NEXT:    li a4, 0
+; RV64IA-NEXT:    xor a4, a3, a4
+; RV64IA-NEXT:    and a4, a4, a2
+; RV64IA-NEXT:    xor a4, a3, a4
+; RV64IA-NEXT:    sc.w a4, a4, (a1)
+; RV64IA-NEXT:    bnez a4, .LBB5_1
+; RV64IA-NEXT:  # %bb.2:
+; RV64IA-NEXT:    srlw a0, a3, a0
+; RV64IA-NEXT:    ret
+  %1 = atomicrmw xchg ptr %a, i8 0 monotonic
+  ret i8 %1
+}
+
+define i8 @atomicrmw_xchg_0_i8_acquire(ptr %a) nounwind {
+; RV32I-LABEL: atomicrmw_xchg_0_i8_acquire:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    li a2, 2
+; RV32I-NEXT:    li a1, 0
+; RV32I-NEXT:    call __atomic_exchange_1 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV32IA-LABEL: atomicrmw_xchg_0_i8_acquire:
+; RV32IA:       # %bb.0:
+; RV32IA-NEXT:    andi a1, a0, -4
+; RV32IA-NEXT:    slli a0, a0, 3
+; RV32IA-NEXT:    li a2, 255
+; RV32IA-NEXT:    sll a2, a2, a0
+; RV32IA-NEXT:  .LBB6_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:    lr.w.aq a3, (a1)
+; RV32IA-NEXT:    li a4, 0
+; RV32IA-NEXT:    xor a4, a3, a4
+; RV32IA-NEXT:    and a4, a4, a2
+; RV32IA-NEXT:    xor a4, a3, a4
+; RV32IA-NEXT:    sc.w a4, a4, (a1)
+; RV32IA-NEXT:    bnez a4, .LBB6_1
+; RV32IA-NEXT:  # %bb.2:
+; RV32IA-NEXT:    srl a0, a3, a0
+; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xchg_0_i8_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    li a2, 2
+; RV64I-NEXT:    li a1, 0
+; RV64I-NEXT:    call __atomic_exchange_1 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+;
+; RV64IA-LABEL: atomicrmw_xchg_0_i8_acquire:
+; RV64IA:       # %bb.0:
+; RV64IA-NEXT:    andi a1, a0, -4
+; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    li a2, 255
+; RV64IA-NEXT:    sllw a2, a2, a0
+; RV64IA-NEXT:  .LBB6_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:    lr.w.aq a3, (a1)
+; RV64IA-NEXT:    li a4, 0
+; RV64IA-NEXT:    xor a4, a3, a4
+; RV64IA-NEXT:    and a4, a4, a2
+; RV64IA-NEXT:    xor a4, a3, a4
+; RV64IA-NEXT:    sc.w a4, a4, (a1)
+; RV64IA-NEXT:    bnez a4, .LBB6_1
+; RV64IA-NEXT:  # %bb.2:
+; RV64IA-NEXT:    srlw a0, a3, a0
+; RV64IA-NEXT:    ret
+  %1 = atomicrmw xchg ptr %a, i8 0 acquire
+  ret i8 %1
+}
+
+define i8 @atomicrmw_xchg_0_i8_release(ptr %a) nounwind {
+; RV32I-LABEL: atomicrmw_xchg_0_i8_release:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    li a2, 3
+; RV32I-NEXT:    li a1, 0
+; RV32I-NEXT:    call __atomic_exchange_1 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV32IA-LABEL: atomicrmw_xchg_0_i8_release:
+; RV32IA:       # %bb.0:
+; RV32IA-NEXT:    andi a1, a0, -4
+; RV32IA-NEXT:    slli a0, a0, 3
+; RV32IA-NEXT:    li a2, 255
+; RV32IA-NEXT:    sll a2, a2, a0
+; RV32IA-NEXT:  .LBB7_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:    lr.w a3, (a1)
+; RV32IA-NEXT:    li a4, 0
+; RV32IA-NEXT:    xor a4, a3, a4
+; RV32IA-NEXT:    and a4, a4, a2
+; RV32IA-NEXT:    xor a4, a3, a4
+; RV32IA-NEXT:    sc.w.rl a4, a4, (a1)
+; RV32IA-NEXT:    bnez a4, .LBB7_1
+; RV32IA-NEXT:  # %bb.2:
+; RV32IA-NEXT:    srl a0, a3, a0
+; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xchg_0_i8_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    li a2, 3
+; RV64I-NEXT:    li a1, 0
+; RV64I-NEXT:    call __atomic_exchange_1 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+;
+; RV64IA-LABEL: atomicrmw_xchg_0_i8_release:
+; RV64IA:       # %bb.0:
+; RV64IA-NEXT:    andi a1, a0, -4
+; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    li a2, 255
+; RV64IA-NEXT:    sllw a2, a2, a0
+; RV64IA-NEXT:  .LBB7_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:    lr.w a3, (a1)
+; RV64IA-NEXT:    li a4, 0
+; RV64IA-NEXT:    xor a4, a3, a4
+; RV64IA-NEXT:    and a4, a4, a2
+; RV64IA-NEXT:    xor a4, a3, a4
+; RV64IA-NEXT:    sc.w.rl a4, a4, (a1)
+; RV64IA-NEXT:    bnez a4, .LBB7_1
+; RV64IA-NEXT:  # %bb.2:
+; RV64IA-NEXT:    srlw a0, a3, a0
+; RV64IA-NEXT:    ret
+  %1 = atomicrmw xchg ptr %a, i8 0 release
+  ret i8 %1
+}
+
+define i8 @atomicrmw_xchg_0_i8_acq_rel(ptr %a) nounwind {
+; RV32I-LABEL: atomicrmw_xchg_0_i8_acq_rel:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    li a2, 4
+; RV32I-NEXT:    li a1, 0
+; RV32I-NEXT:    call __atomic_exchange_1 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV32IA-LABEL: atomicrmw_xchg_0_i8_acq_rel:
+; RV32IA:       # %bb.0:
+; RV32IA-NEXT:    andi a1, a0, -4
+; RV32IA-NEXT:    slli a0, a0, 3
+; RV32IA-NEXT:    li a2, 255
+; RV32IA-NEXT:    sll a2, a2, a0
+; RV32IA-NEXT:  .LBB8_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:    lr.w.aq a3, (a1)
+; RV32IA-NEXT:    li a4, 0
+; RV32IA-NEXT:    xor a4, a3, a4
+; RV32IA-NEXT:    and a4, a4, a2
+; RV32IA-NEXT:    xor a4, a3, a4
+; RV32IA-NEXT:    sc.w.rl a4, a4, (a1)
+; RV32IA-NEXT:    bnez a4, .LBB8_1
+; RV32IA-NEXT:  # %bb.2:
+; RV32IA-NEXT:    srl a0, a3, a0
+; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xchg_0_i8_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    li a2, 4
+; RV64I-NEXT:    li a1, 0
+; RV64I-NEXT:    call __atomic_exchange_1 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+;
+; RV64IA-LABEL: atomicrmw_xchg_0_i8_acq_rel:
+; RV64IA:       # %bb.0:
+; RV64IA-NEXT:    andi a1, a0, -4
+; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    li a2, 255
+; RV64IA-NEXT:    sllw a2, a2, a0
+; RV64IA-NEXT:  .LBB8_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:    lr.w.aq a3, (a1)
+; RV64IA-NEXT:    li a4, 0
+; RV64IA-NEXT:    xor a4, a3, a4
+; RV64IA-NEXT:    and a4, a4, a2
+; RV64IA-NEXT:    xor a4, a3, a4
+; RV64IA-NEXT:    sc.w.rl a4, a4, (a1)
+; RV64IA-NEXT:    bnez a4, .LBB8_1
+; RV64IA-NEXT:  # %bb.2:
+; RV64IA-NEXT:    srlw a0, a3, a0
+; RV64IA-NEXT:    ret
+  %1 = atomicrmw xchg ptr %a, i8 0 acq_rel
+  ret i8 %1
+}
+
+define i8 @atomicrmw_xchg_0_i8_seq_cst(ptr %a) nounwind {
+; RV32I-LABEL: atomicrmw_xchg_0_i8_seq_cst:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    li a2, 5
+; RV32I-NEXT:    li a1, 0
+; RV32I-NEXT:    call __atomic_exchange_1 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV32IA-LABEL: atomicrmw_xchg_0_i8_seq_cst:
+; RV32IA:       # %bb.0:
+; RV32IA-NEXT:    andi a1, a0, -4
+; RV32IA-NEXT:    slli a0, a0, 3
+; RV32IA-NEXT:    li a2, 255
+; RV32IA-NEXT:    sll a2, a2, a0
+; RV32IA-NEXT:  .LBB9_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:    lr.w.aqrl a3, (a1)
+; RV32IA-NEXT:    li a4, 0
+; RV32IA-NEXT:    xor a4, a3, a4
+; RV32IA-NEXT:    and a4, a4, a2
+; RV32IA-NEXT:    xor a4, a3, a4
+; RV32IA-NEXT:    sc.w.rl a4, a4, (a1)
+; RV32IA-NEXT:    bnez a4, .LBB9_1
+; RV32IA-NEXT:  # %bb.2:
+; RV32IA-NEXT:    srl a0, a3, a0
+; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xchg_0_i8_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    li a2, 5
+; RV64I-NEXT:    li a1, 0
+; RV64I-NEXT:    call __atomic_exchange_1 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+;
+; RV64IA-LABEL: atomicrmw_xchg_0_i8_seq_cst:
+; RV64IA:       # %bb.0:
+; RV64IA-NEXT:    andi a1, a0, -4
+; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    li a2, 255
+; RV64IA-NEXT:    sllw a2, a2, a0
+; RV64IA-NEXT:  .LBB9_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:    lr.w.aqrl a3, (a1)
+; RV64IA-NEXT:    li a4, 0
+; RV64IA-NEXT:    xor a4, a3, a4
+; RV64IA-NEXT:    and a4, a4, a2
+; RV64IA-NEXT:    xor a4, a3, a4
+; RV64IA-NEXT:    sc.w.rl a4, a4, (a1)
+; RV64IA-NEXT:    bnez a4, .LBB9_1
+; RV64IA-NEXT:  # %bb.2:
+; RV64IA-NEXT:    srlw a0, a3, a0
+; RV64IA-NEXT:    ret
+  %1 = atomicrmw xchg ptr %a, i8 0 seq_cst
+  ret i8 %1
+}
+
 define i8 @atomicrmw_add_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV32I-LABEL: atomicrmw_add_i8_monotonic:
 ; RV32I:       # %bb.0:
@@ -351,14 +664,14 @@ define i8 @atomicrmw_add_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV32IA-NEXT:    sll a3, a3, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:  .LBB5_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB10_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w a4, (a2)
 ; RV32IA-NEXT:    add a5, a4, a1
 ; RV32IA-NEXT:    xor a5, a4, a5
 ; RV32IA-NEXT:    and a5, a5, a3
 ; RV32IA-NEXT:    xor a5, a4, a5
 ; RV32IA-NEXT:    sc.w a5, a5, (a2)
-; RV32IA-NEXT:    bnez a5, .LBB5_1
+; RV32IA-NEXT:    bnez a5, .LBB10_1
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a0
 ; RV32IA-NEXT:    ret
@@ -381,14 +694,14 @@ define i8 @atomicrmw_add_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:  .LBB5_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB10_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w a4, (a2)
 ; RV64IA-NEXT:    add a5, a4, a1
 ; RV64IA-NEXT:    xor a5, a4, a5
 ; RV64IA-NEXT:    and a5, a5, a3
 ; RV64IA-NEXT:    xor a5, a4, a5
 ; RV64IA-NEXT:    sc.w a5, a5, (a2)
-; RV64IA-NEXT:    bnez a5, .LBB5_1
+; RV64IA-NEXT:    bnez a5, .LBB10_1
 ; RV64IA-NEXT:  # %bb.2:
 ; RV64IA-NEXT:    srlw a0, a4, a0
 ; RV64IA-NEXT:    ret
@@ -415,14 +728,14 @@ define i8 @atomicrmw_add_i8_acquire(ptr %a, i8 %b) nounwind {
 ; RV32IA-NEXT:    sll a3, a3, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:  .LBB6_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB11_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w.aq a4, (a2)
 ; RV32IA-NEXT:    add a5, a4, a1
 ; RV32IA-NEXT:    xor a5, a4, a5
 ; RV32IA-NEXT:    and a5, a5, a3
 ; RV32IA-NEXT:    xor a5, a4, a5
 ; RV32IA-NEXT:    sc.w a5, a5, (a2)
-; RV32IA-NEXT:    bnez a5, .LBB6_1
+; RV32IA-NEXT:    bnez a5, .LBB11_1
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a0
 ; RV32IA-NEXT:    ret
@@ -445,14 +758,14 @@ define i8 @atomicrmw_add_i8_acquire(ptr %a, i8 %b) nounwind {
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:  .LBB6_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB11_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w.aq a4, (a2)
 ; RV64IA-NEXT:    add a5, a4, a1
 ; RV64IA-NEXT:    xor a5, a4, a5
 ; RV64IA-NEXT:    and a5, a5, a3
 ; RV64IA-NEXT:    xor a5, a4, a5
 ; RV64IA-NEXT:    sc.w a5, a5, (a2)
-; RV64IA-NEXT:    bnez a5, .LBB6_1
+; RV64IA-NEXT:    bnez a5, .LBB11_1
 ; RV64IA-NEXT:  # %bb.2:
 ; RV64IA-NEXT:    srlw a0, a4, a0
 ; RV64IA-NEXT:    ret
@@ -479,14 +792,14 @@ define i8 @atomicrmw_add_i8_release(ptr %a, i8 %b) nounwind {
 ; RV32IA-NEXT:    sll a3, a3, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:  .LBB7_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB12_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w a4, (a2)
 ; RV32IA-NEXT:    add a5, a4, a1
 ; RV32IA-NEXT:    xor a5, a4, a5
 ; RV32IA-NEXT:    and a5, a5, a3
 ; RV32IA-NEXT:    xor a5, a4, a5
 ; RV32IA-NEXT:    sc.w.rl a5, a5, (a2)
-; RV32IA-NEXT:    bnez a5, .LBB7_1
+; RV32IA-NEXT:    bnez a5, .LBB12_1
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a0
 ; RV32IA-NEXT:    ret
@@ -509,14 +822,14 @@ define i8 @atomicrmw_add_i8_release(ptr %a, i8 %b) nounwind {
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:  .LBB7_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB12_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w a4, (a2)
 ; RV64IA-NEXT:    add a5, a4, a1
 ; RV64IA-NEXT:    xor a5, a4, a5
 ; RV64IA-NEXT:    and a5, a5, a3
 ; RV64IA-NEXT:    xor a5, a4, a5
 ; RV64IA-NEXT:    sc.w.rl a5, a5, (a2)
-; RV64IA-NEXT:    bnez a5, .LBB7_1
+; RV64IA-NEXT:    bnez a5, .LBB12_1
 ; RV64IA-NEXT:  # %bb.2:
 ; RV64IA-NEXT:    srlw a0, a4, a0
 ; RV64IA-NEXT:    ret
@@ -543,14 +856,14 @@ define i8 @atomicrmw_add_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ; RV32IA-NEXT:    sll a3, a3, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:  .LBB8_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB13_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w.aq a4, (a2)
 ; RV32IA-NEXT:    add a5, a4, a1
 ; RV32IA-NEXT:    xor a5, a4, a5
 ; RV32IA-NEXT:    and a5, a5, a3
 ; RV32IA-NEXT:    xor a5, a4, a5
 ; RV32IA-NEXT:    sc.w.rl a5, a5, (a2)
-; RV32IA-NEXT:    bnez a5, .LBB8_1
+; RV32IA-NEXT:    bnez a5, .LBB13_1
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a0
 ; RV32IA-NEXT:    ret
@@ -573,14 +886,14 @@ define i8 @atomicrmw_add_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:  .LBB8_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB13_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w.aq a4, (a2)
 ; RV64IA-NEXT:    add a5, a4, a1
 ; RV64IA-NEXT:    xor a5, a4, a5
 ; RV64IA-NEXT:    and a5, a5, a3
 ; RV64IA-NEXT:    xor a5, a4, a5
 ; RV64IA-NEXT:    sc.w.rl a5, a5, (a2)
-; RV64IA-NEXT:    bnez a5, .LBB8_1
+; RV64IA-NEXT:    bnez a5, .LBB13_1
 ; RV64IA-NEXT:  # %bb.2:
 ; RV64IA-NEXT:    srlw a0, a4, a0
 ; RV64IA-NEXT:    ret
@@ -607,14 +920,14 @@ define i8 @atomicrmw_add_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; RV32IA-NEXT:    sll a3, a3, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:  .LBB9_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB14_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w.aqrl a4, (a2)
 ; RV32IA-NEXT:    add a5, a4, a1
 ; RV32IA-NEXT:    xor a5, a4, a5
 ; RV32IA-NEXT:    and a5, a5, a3
 ; RV32IA-NEXT:    xor a5, a4, a5
 ; RV32IA-NEXT:    sc.w.rl a5, a5, (a2)
-; RV32IA-NEXT:    bnez a5, .LBB9_1
+; RV32IA-NEXT:    bnez a5, .LBB14_1
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a0
 ; RV32IA-NEXT:    ret
@@ -637,14 +950,14 @@ define i8 @atomicrmw_add_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:  .LBB9_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB14_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w.aqrl a4, (a2)
 ; RV64IA-NEXT:    add a5, a4, a1
 ; RV64IA-NEXT:    xor a5, a4, a5
 ; RV64IA-NEXT:    and a5, a5, a3
 ; RV64IA-NEXT:    xor a5, a4, a5
 ; RV64IA-NEXT:    sc.w.rl a5, a5, (a2)
-; RV64IA-NEXT:    bnez a5, .LBB9_1
+; RV64IA-NEXT:    bnez a5, .LBB14_1
 ; RV64IA-NEXT:  # %bb.2:
 ; RV64IA-NEXT:    srlw a0, a4, a0
 ; RV64IA-NEXT:    ret
@@ -671,14 +984,14 @@ define i8 @atomicrmw_sub_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV32IA-NEXT:    sll a3, a3, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:  .LBB10_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB15_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w a4, (a2)
 ; RV32IA-NEXT:    sub a5, a4, a1
 ; RV32IA-NEXT:    xor a5, a4, a5
 ; RV32IA-NEXT:    and a5, a5, a3
 ; RV32IA-NEXT:    xor a5, a4, a5
 ; RV32IA-NEXT:    sc.w a5, a5, (a2)
-; RV32IA-NEXT:    bnez a5, .LBB10_1
+; RV32IA-NEXT:    bnez a5, .LBB15_1
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a0
 ; RV32IA-NEXT:    ret
@@ -701,14 +1014,14 @@ define i8 @atomicrmw_sub_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:  .LBB10_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB15_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w a4, (a2)
 ; RV64IA-NEXT:    sub a5, a4, a1
 ; RV64IA-NEXT:    xor a5, a4, a5
 ; RV64IA-NEXT:    and a5, a5, a3
 ; RV64IA-NEXT:    xor a5, a4, a5
 ; RV64IA-NEXT:    sc.w a5, a5, (a2)
-; RV64IA-NEXT:    bnez a5, .LBB10_1
+; RV64IA-NEXT:    bnez a5, .LBB15_1
 ; RV64IA-NEXT:  # %bb.2:
 ; RV64IA-NEXT:    srlw a0, a4, a0
 ; RV64IA-NEXT:    ret
@@ -735,14 +1048,14 @@ define i8 @atomicrmw_sub_i8_acquire(ptr %a, i8 %b) nounwind {
 ; RV32IA-NEXT:    sll a3, a3, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:  .LBB11_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB16_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w.aq a4, (a2)
 ; RV32IA-NEXT:    sub a5, a4, a1
 ; RV32IA-NEXT:    xor a5, a4, a5
 ; RV32IA-NEXT:    and a5, a5, a3
 ; RV32IA-NEXT:    xor a5, a4, a5
 ; RV32IA-NEXT:    sc.w a5, a5, (a2)
-; RV32IA-NEXT:    bnez a5, .LBB11_1
+; RV32IA-NEXT:    bnez a5, .LBB16_1
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a0
 ; RV32IA-NEXT:    ret
@@ -765,14 +1078,14 @@ define i8 @atomicrmw_sub_i8_acquire(ptr %a, i8 %b) nounwind {
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:  .LBB11_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB16_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w.aq a4, (a2)
 ; RV64IA-NEXT:    sub a5, a4, a1
 ; RV64IA-NEXT:    xor a5, a4, a5
 ; RV64IA-NEXT:    and a5, a5, a3
 ; RV64IA-NEXT:    xor a5, a4, a5
 ; RV64IA-NEXT:    sc.w a5, a5, (a2)
-; RV64IA-NEXT:    bnez a5, .LBB11_1
+; RV64IA-NEXT:    bnez a5, .LBB16_1
 ; RV64IA-NEXT:  # %bb.2:
 ; RV64IA-NEXT:    srlw a0, a4, a0
 ; RV64IA-NEXT:    ret
@@ -799,14 +1112,14 @@ define i8 @atomicrmw_sub_i8_release(ptr %a, i8 %b) nounwind {
 ; RV32IA-NEXT:    sll a3, a3, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:  .LBB12_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB17_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w a4, (a2)
 ; RV32IA-NEXT:    sub a5, a4, a1
 ; RV32IA-NEXT:    xor a5, a4, a5
 ; RV32IA-NEXT:    and a5, a5, a3
 ; RV32IA-NEXT:    xor a5, a4, a5
 ; RV32IA-NEXT:    sc.w.rl a5, a5, (a2)
-; RV32IA-NEXT:    bnez a5, .LBB12_1
+; RV32IA-NEXT:    bnez a5, .LBB17_1
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a0
 ; RV32IA-NEXT:    ret
@@ -829,14 +1142,14 @@ define i8 @atomicrmw_sub_i8_release(ptr %a, i8 %b) nounwind {
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:  .LBB12_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB17_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w a4, (a2)
 ; RV64IA-NEXT:    sub a5, a4, a1
 ; RV64IA-NEXT:    xor a5, a4, a5
 ; RV64IA-NEXT:    and a5, a5, a3
 ; RV64IA-NEXT:    xor a5, a4, a5
 ; RV64IA-NEXT:    sc.w.rl a5, a5, (a2)
-; RV64IA-NEXT:    bnez a5, .LBB12_1
+; RV64IA-NEXT:    bnez a5, .LBB17_1
 ; RV64IA-NEXT:  # %bb.2:
 ; RV64IA-NEXT:    srlw a0, a4, a0
 ; RV64IA-NEXT:    ret
@@ -863,14 +1176,14 @@ define i8 @atomicrmw_sub_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ; RV32IA-NEXT:    sll a3, a3, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:  .LBB13_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB18_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w.aq a4, (a2)
 ; RV32IA-NEXT:    sub a5, a4, a1
 ; RV32IA-NEXT:    xor a5, a4, a5
 ; RV32IA-NEXT:    and a5, a5, a3
 ; RV32IA-NEXT:    xor a5, a4, a5
 ; RV32IA-NEXT:    sc.w.rl a5, a5, (a2)
-; RV32IA-NEXT:    bnez a5, .LBB13_1
+; RV32IA-NEXT:    bnez a5, .LBB18_1
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a0
 ; RV32IA-NEXT:    ret
@@ -893,14 +1206,14 @@ define i8 @atomicrmw_sub_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:  .LBB13_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB18_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w.aq a4, (a2)
 ; RV64IA-NEXT:    sub a5, a4, a1
 ; RV64IA-NEXT:    xor a5, a4, a5
 ; RV64IA-NEXT:    and a5, a5, a3
 ; RV64IA-NEXT:    xor a5, a4, a5
 ; RV64IA-NEXT:    sc.w.rl a5, a5, (a2)
-; RV64IA-NEXT:    bnez a5, .LBB13_1
+; RV64IA-NEXT:    bnez a5, .LBB18_1
 ; RV64IA-NEXT:  # %bb.2:
 ; RV64IA-NEXT:    srlw a0, a4, a0
 ; RV64IA-NEXT:    ret
@@ -927,14 +1240,14 @@ define i8 @atomicrmw_sub_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; RV32IA-NEXT:    sll a3, a3, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:  .LBB14_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB19_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w.aqrl a4, (a2)
 ; RV32IA-NEXT:    sub a5, a4, a1
 ; RV32IA-NEXT:    xor a5, a4, a5
 ; RV32IA-NEXT:    and a5, a5, a3
 ; RV32IA-NEXT:    xor a5, a4, a5
 ; RV32IA-NEXT:    sc.w.rl a5, a5, (a2)
-; RV32IA-NEXT:    bnez a5, .LBB14_1
+; RV32IA-NEXT:    bnez a5, .LBB19_1
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a0
 ; RV32IA-NEXT:    ret
@@ -957,14 +1270,14 @@ define i8 @atomicrmw_sub_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:  .LBB14_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB19_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w.aqrl a4, (a2)
 ; RV64IA-NEXT:    sub a5, a4, a1
 ; RV64IA-NEXT:    xor a5, a4, a5
 ; RV64IA-NEXT:    and a5, a5, a3
 ; RV64IA-NEXT:    xor a5, a4, a5
 ; RV64IA-NEXT:    sc.w.rl a5, a5, (a2)
-; RV64IA-NEXT:    bnez a5, .LBB14_1
+; RV64IA-NEXT:    bnez a5, .LBB19_1
 ; RV64IA-NEXT:  # %bb.2:
 ; RV64IA-NEXT:    srlw a0, a4, a0
 ; RV64IA-NEXT:    ret
@@ -1251,7 +1564,7 @@ define i8 @atomicrmw_nand_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV32IA-NEXT:    sll a3, a3, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:  .LBB20_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB25_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w a4, (a2)
 ; RV32IA-NEXT:    and a5, a4, a1
 ; RV32IA-NEXT:    not a5, a5
@@ -1259,7 +1572,7 @@ define i8 @atomicrmw_nand_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV32IA-NEXT:    and a5, a5, a3
 ; RV32IA-NEXT:    xor a5, a4, a5
 ; RV32IA-NEXT:    sc.w a5, a5, (a2)
-; RV32IA-NEXT:    bnez a5, .LBB20_1
+; RV32IA-NEXT:    bnez a5, .LBB25_1
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a0
 ; RV32IA-NEXT:    ret
@@ -1282,7 +1595,7 @@ define i8 @atomicrmw_nand_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:  .LBB20_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB25_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w a4, (a2)
 ; RV64IA-NEXT:    and a5, a4, a1
 ; RV64IA-NEXT:    not a5, a5
@@ -1290,7 +1603,7 @@ define i8 @atomicrmw_nand_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV64IA-NEXT:    and a5, a5, a3
 ; RV64IA-NEXT:    xor a5, a4, a5
 ; RV64IA-NEXT:    sc.w a5, a5, (a2)
-; RV64IA-NEXT:    bnez a5, .LBB20_1
+; RV64IA-NEXT:    bnez a5, .LBB25_1
 ; RV64IA-NEXT:  # %bb.2:
 ; RV64IA-NEXT:    srlw a0, a4, a0
 ; RV64IA-NEXT:    ret
@@ -1317,7 +1630,7 @@ define i8 @atomicrmw_nand_i8_acquire(ptr %a, i8 %b) nounwind {
 ; RV32IA-NEXT:    sll a3, a3, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:  .LBB21_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB26_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w.aq a4, (a2)
 ; RV32IA-NEXT:    and a5, a4, a1
 ; RV32IA-NEXT:    not a5, a5
@@ -1325,7 +1638,7 @@ define i8 @atomicrmw_nand_i8_acquire(ptr %a, i8 %b) nounwind {
 ; RV32IA-NEXT:    and a5, a5, a3
 ; RV32IA-NEXT:    xor a5, a4, a5
 ; RV32IA-NEXT:    sc.w a5, a5, (a2)
-; RV32IA-NEXT:    bnez a5, .LBB21_1
+; RV32IA-NEXT:    bnez a5, .LBB26_1
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a0
 ; RV32IA-NEXT:    ret
@@ -1348,7 +1661,7 @@ define i8 @atomicrmw_nand_i8_acquire(ptr %a, i8 %b) nounwind {
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:  .LBB21_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB26_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w.aq a4, (a2)
 ; RV64IA-NEXT:    and a5, a4, a1
 ; RV64IA-NEXT:    not a5, a5
@@ -1356,7 +1669,7 @@ define i8 @atomicrmw_nand_i8_acquire(ptr %a, i8 %b) nounwind {
 ; RV64IA-NEXT:    and a5, a5, a3
 ; RV64IA-NEXT:    xor a5, a4, a5
 ; RV64IA-NEXT:    sc.w a5, a5, (a2)
-; RV64IA-NEXT:    bnez a5, .LBB21_1
+; RV64IA-NEXT:    bnez a5, .LBB26_1
 ; RV64IA-NEXT:  # %bb.2:
 ; RV64IA-NEXT:    srlw a0, a4, a0
 ; RV64IA-NEXT:    ret
@@ -1383,7 +1696,7 @@ define i8 @atomicrmw_nand_i8_release(ptr %a, i8 %b) nounwind {
 ; RV32IA-NEXT:    sll a3, a3, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:  .LBB22_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB27_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w a4, (a2)
 ; RV32IA-NEXT:    and a5, a4, a1
 ; RV32IA-NEXT:    not a5, a5
@@ -1391,7 +1704,7 @@ define i8 @atomicrmw_nand_i8_release(ptr %a, i8 %b) nounwind {
 ; RV32IA-NEXT:    and a5, a5, a3
 ; RV32IA-NEXT:    xor a5, a4, a5
 ; RV32IA-NEXT:    sc.w.rl a5, a5, (a2)
-; RV32IA-NEXT:    bnez a5, .LBB22_1
+; RV32IA-NEXT:    bnez a5, .LBB27_1
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a0
 ; RV32IA-NEXT:    ret
@@ -1414,7 +1727,7 @@ define i8 @atomicrmw_nand_i8_release(ptr %a, i8 %b) nounwind {
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:  .LBB22_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB27_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w a4, (a2)
 ; RV64IA-NEXT:    and a5, a4, a1
 ; RV64IA-NEXT:    not a5, a5
@@ -1422,7 +1735,7 @@ define i8 @atomicrmw_nand_i8_release(ptr %a, i8 %b) nounwind {
 ; RV64IA-NEXT:    and a5, a5, a3
 ; RV64IA-NEXT:    xor a5, a4, a5
 ; RV64IA-NEXT:    sc.w.rl a5, a5, (a2)
-; RV64IA-NEXT:    bnez a5, .LBB22_1
+; RV64IA-NEXT:    bnez a5, .LBB27_1
 ; RV64IA-NEXT:  # %bb.2:
 ; RV64IA-NEXT:    srlw a0, a4, a0
 ; RV64IA-NEXT:    ret
@@ -1449,7 +1762,7 @@ define i8 @atomicrmw_nand_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ; RV32IA-NEXT:    sll a3, a3, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:  .LBB23_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB28_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w.aq a4, (a2)
 ; RV32IA-NEXT:    and a5, a4, a1
 ; RV32IA-NEXT:    not a5, a5
@@ -1457,7 +1770,7 @@ define i8 @atomicrmw_nand_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ; RV32IA-NEXT:    and a5, a5, a3
 ; RV32IA-NEXT:    xor a5, a4, a5
 ; RV32IA-NEXT:    sc.w.rl a5, a5, (a2)
-; RV32IA-NEXT:    bnez a5, .LBB23_1
+; RV32IA-NEXT:    bnez a5, .LBB28_1
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a0
 ; RV32IA-NEXT:    ret
@@ -1480,7 +1793,7 @@ define i8 @atomicrmw_nand_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:  .LBB23_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB28_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w.aq a4, (a2)
 ; RV64IA-NEXT:    and a5, a4, a1
 ; RV64IA-NEXT:    not a5, a5
@@ -1488,7 +1801,7 @@ define i8 @atomicrmw_nand_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ; RV64IA-NEXT:    and a5, a5, a3
 ; RV64IA-NEXT:    xor a5, a4, a5
 ; RV64IA-NEXT:    sc.w.rl a5, a5, (a2)
-; RV64IA-NEXT:    bnez a5, .LBB23_1
+; RV64IA-NEXT:    bnez a5, .LBB28_1
 ; RV64IA-NEXT:  # %bb.2:
 ; RV64IA-NEXT:    srlw a0, a4, a0
 ; RV64IA-NEXT:    ret
@@ -1515,7 +1828,7 @@ define i8 @atomicrmw_nand_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; RV32IA-NEXT:    sll a3, a3, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:  .LBB24_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB29_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w.aqrl a4, (a2)
 ; RV32IA-NEXT:    and a5, a4, a1
 ; RV32IA-NEXT:    not a5, a5
@@ -1523,7 +1836,7 @@ define i8 @atomicrmw_nand_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; RV32IA-NEXT:    and a5, a5, a3
 ; RV32IA-NEXT:    xor a5, a4, a5
 ; RV32IA-NEXT:    sc.w.rl a5, a5, (a2)
-; RV32IA-NEXT:    bnez a5, .LBB24_1
+; RV32IA-NEXT:    bnez a5, .LBB29_1
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a4, a0
 ; RV32IA-NEXT:    ret
@@ -1546,7 +1859,7 @@ define i8 @atomicrmw_nand_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:  .LBB24_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB29_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w.aqrl a4, (a2)
 ; RV64IA-NEXT:    and a5, a4, a1
 ; RV64IA-NEXT:    not a5, a5
@@ -1554,7 +1867,7 @@ define i8 @atomicrmw_nand_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; RV64IA-NEXT:    and a5, a5, a3
 ; RV64IA-NEXT:    xor a5, a4, a5
 ; RV64IA-NEXT:    sc.w.rl a5, a5, (a2)
-; RV64IA-NEXT:    bnez a5, .LBB24_1
+; RV64IA-NEXT:    bnez a5, .LBB29_1
 ; RV64IA-NEXT:  # %bb.2:
 ; RV64IA-NEXT:    srlw a0, a4, a0
 ; RV64IA-NEXT:    ret
@@ -2015,9 +2328,9 @@ define i8 @atomicrmw_max_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    mv s1, a1
 ; RV32I-NEXT:    slli a0, a1, 24
 ; RV32I-NEXT:    srai s2, a0, 24
-; RV32I-NEXT:    j .LBB35_2
-; RV32I-NEXT:  .LBB35_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB35_2 Depth=1
+; RV32I-NEXT:    j .LBB40_2
+; RV32I-NEXT:  .LBB40_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB40_2 Depth=1
 ; RV32I-NEXT:    sb a3, 15(sp)
 ; RV32I-NEXT:    addi a1, sp, 15
 ; RV32I-NEXT:    mv a0, s0
@@ -2025,18 +2338,18 @@ define i8 @atomicrmw_max_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lbu a3, 15(sp)
-; RV32I-NEXT:    bnez a0, .LBB35_4
-; RV32I-NEXT:  .LBB35_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB40_4
+; RV32I-NEXT:  .LBB40_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    slli a0, a3, 24
 ; RV32I-NEXT:    srai a0, a0, 24
 ; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    blt s2, a0, .LBB35_1
+; RV32I-NEXT:    blt s2, a0, .LBB40_1
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB35_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB40_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
-; RV32I-NEXT:    j .LBB35_1
-; RV32I-NEXT:  .LBB35_4: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB40_1
+; RV32I-NEXT:  .LBB40_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
@@ -2056,20 +2369,20 @@ define i8 @atomicrmw_max_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV32IA-NEXT:    srai a1, a1, 24
 ; RV32IA-NEXT:    sll a1, a1, a0
 ; RV32IA-NEXT:    xori a3, a3, 24
-; RV32IA-NEXT:  .LBB35_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB40_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w a5, (a2)
 ; RV32IA-NEXT:    and a7, a5, a4
 ; RV32IA-NEXT:    mv a6, a5
 ; RV32IA-NEXT:    sll a7, a7, a3
 ; RV32IA-NEXT:    sra a7, a7, a3
-; RV32IA-NEXT:    bge a7, a1, .LBB35_3
-; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB35_1 Depth=1
+; RV32IA-NEXT:    bge a7, a1, .LBB40_3
+; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB40_1 Depth=1
 ; RV32IA-NEXT:    xor a6, a5, a1
 ; RV32IA-NEXT:    and a6, a6, a4
 ; RV32IA-NEXT:    xor a6, a5, a6
-; RV32IA-NEXT:  .LBB35_3: # in Loop: Header=BB35_1 Depth=1
+; RV32IA-NEXT:  .LBB40_3: # in Loop: Header=BB40_1 Depth=1
 ; RV32IA-NEXT:    sc.w a6, a6, (a2)
-; RV32IA-NEXT:    bnez a6, .LBB35_1
+; RV32IA-NEXT:    bnez a6, .LBB40_1
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a5, a0
 ; RV32IA-NEXT:    ret
@@ -2086,9 +2399,9 @@ define i8 @atomicrmw_max_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    slli a0, a1, 56
 ; RV64I-NEXT:    srai s2, a0, 56
-; RV64I-NEXT:    j .LBB35_2
-; RV64I-NEXT:  .LBB35_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB35_2 Depth=1
+; RV64I-NEXT:    j .LBB40_2
+; RV64I-NEXT:  .LBB40_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB40_2 Depth=1
 ; RV64I-NEXT:    sb a3, 15(sp)
 ; RV64I-NEXT:    addi a1, sp, 15
 ; RV64I-NEXT:    mv a0, s0
@@ -2096,18 +2409,18 @@ define i8 @atomicrmw_max_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    lbu a3, 15(sp)
-; RV64I-NEXT:    bnez a0, .LBB35_4
-; RV64I-NEXT:  .LBB35_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB40_4
+; RV64I-NEXT:  .LBB40_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    slli a0, a3, 56
 ; RV64I-NEXT:    srai a0, a0, 56
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    blt s2, a0, .LBB35_1
+; RV64I-NEXT:    blt s2, a0, .LBB40_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB35_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB40_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB35_1
-; RV64I-NEXT:  .LBB35_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB40_1
+; RV64I-NEXT:  .LBB40_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
@@ -2127,20 +2440,20 @@ define i8 @atomicrmw_max_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV64IA-NEXT:    srai a1, a1, 56
 ; RV64IA-NEXT:    sllw a1, a1, a0
 ; RV64IA-NEXT:    xori a3, a3, 56
-; RV64IA-NEXT:  .LBB35_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB40_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w a5, (a2)
 ; RV64IA-NEXT:    and a7, a5, a4
 ; RV64IA-NEXT:    mv a6, a5
 ; RV64IA-NEXT:    sll a7, a7, a3
 ; RV64IA-NEXT:    sra a7, a7, a3
-; RV64IA-NEXT:    bge a7, a1, .LBB35_3
-; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB35_1 Depth=1
+; RV64IA-NEXT:    bge a7, a1, .LBB40_3
+; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB40_1 Depth=1
 ; RV64IA-NEXT:    xor a6, a5, a1
 ; RV64IA-NEXT:    and a6, a6, a4
 ; RV64IA-NEXT:    xor a6, a5, a6
-; RV64IA-NEXT:  .LBB35_3: # in Loop: Header=BB35_1 Depth=1
+; RV64IA-NEXT:  .LBB40_3: # in Loop: Header=BB40_1 Depth=1
 ; RV64IA-NEXT:    sc.w a6, a6, (a2)
-; RV64IA-NEXT:    bnez a6, .LBB35_1
+; RV64IA-NEXT:    bnez a6, .LBB40_1
 ; RV64IA-NEXT:  # %bb.4:
 ; RV64IA-NEXT:    srlw a0, a5, a0
 ; RV64IA-NEXT:    ret
@@ -2161,9 +2474,9 @@ define i8 @atomicrmw_max_i8_acquire(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    mv s1, a1
 ; RV32I-NEXT:    slli a0, a1, 24
 ; RV32I-NEXT:    srai s2, a0, 24
-; RV32I-NEXT:    j .LBB36_2
-; RV32I-NEXT:  .LBB36_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB36_2 Depth=1
+; RV32I-NEXT:    j .LBB41_2
+; RV32I-NEXT:  .LBB41_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB41_2 Depth=1
 ; RV32I-NEXT:    sb a3, 15(sp)
 ; RV32I-NEXT:    addi a1, sp, 15
 ; RV32I-NEXT:    li a3, 2
@@ -2171,18 +2484,18 @@ define i8 @atomicrmw_max_i8_acquire(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lbu a3, 15(sp)
-; RV32I-NEXT:    bnez a0, .LBB36_4
-; RV32I-NEXT:  .LBB36_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB41_4
+; RV32I-NEXT:  .LBB41_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    slli a0, a3, 24
 ; RV32I-NEXT:    srai a0, a0, 24
 ; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    blt s2, a0, .LBB36_1
+; RV32I-NEXT:    blt s2, a0, .LBB41_1
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB36_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB41_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
-; RV32I-NEXT:    j .LBB36_1
-; RV32I-NEXT:  .LBB36_4: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB41_1
+; RV32I-NEXT:  .LBB41_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
@@ -2202,20 +2515,20 @@ define i8 @atomicrmw_max_i8_acquire(ptr %a, i8 %b) nounwind {
 ; RV32IA-NEXT:    srai a1, a1, 24
 ; RV32IA-NEXT:    sll a1, a1, a0
 ; RV32IA-NEXT:    xori a3, a3, 24
-; RV32IA-NEXT:  .LBB36_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB41_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w.aq a5, (a2)
 ; RV32IA-NEXT:    and a7, a5, a4
 ; RV32IA-NEXT:    mv a6, a5
 ; RV32IA-NEXT:    sll a7, a7, a3
 ; RV32IA-NEXT:    sra a7, a7, a3
-; RV32IA-NEXT:    bge a7, a1, .LBB36_3
-; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB36_1 Depth=1
+; RV32IA-NEXT:    bge a7, a1, .LBB41_3
+; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB41_1 Depth=1
 ; RV32IA-NEXT:    xor a6, a5, a1
 ; RV32IA-NEXT:    and a6, a6, a4
 ; RV32IA-NEXT:    xor a6, a5, a6
-; RV32IA-NEXT:  .LBB36_3: # in Loop: Header=BB36_1 Depth=1
+; RV32IA-NEXT:  .LBB41_3: # in Loop: Header=BB41_1 Depth=1
 ; RV32IA-NEXT:    sc.w a6, a6, (a2)
-; RV32IA-NEXT:    bnez a6, .LBB36_1
+; RV32IA-NEXT:    bnez a6, .LBB41_1
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a5, a0
 ; RV32IA-NEXT:    ret
@@ -2232,9 +2545,9 @@ define i8 @atomicrmw_max_i8_acquire(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    slli a0, a1, 56
 ; RV64I-NEXT:    srai s2, a0, 56
-; RV64I-NEXT:    j .LBB36_2
-; RV64I-NEXT:  .LBB36_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB36_2 Depth=1
+; RV64I-NEXT:    j .LBB41_2
+; RV64I-NEXT:  .LBB41_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB41_2 Depth=1
 ; RV64I-NEXT:    sb a3, 15(sp)
 ; RV64I-NEXT:    addi a1, sp, 15
 ; RV64I-NEXT:    li a3, 2
@@ -2242,18 +2555,18 @@ define i8 @atomicrmw_max_i8_acquire(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    lbu a3, 15(sp)
-; RV64I-NEXT:    bnez a0, .LBB36_4
-; RV64I-NEXT:  .LBB36_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB41_4
+; RV64I-NEXT:  .LBB41_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    slli a0, a3, 56
 ; RV64I-NEXT:    srai a0, a0, 56
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    blt s2, a0, .LBB36_1
+; RV64I-NEXT:    blt s2, a0, .LBB41_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB36_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB41_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB36_1
-; RV64I-NEXT:  .LBB36_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB41_1
+; RV64I-NEXT:  .LBB41_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
@@ -2273,20 +2586,20 @@ define i8 @atomicrmw_max_i8_acquire(ptr %a, i8 %b) nounwind {
 ; RV64IA-NEXT:    srai a1, a1, 56
 ; RV64IA-NEXT:    sllw a1, a1, a0
 ; RV64IA-NEXT:    xori a3, a3, 56
-; RV64IA-NEXT:  .LBB36_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB41_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w.aq a5, (a2)
 ; RV64IA-NEXT:    and a7, a5, a4
 ; RV64IA-NEXT:    mv a6, a5
 ; RV64IA-NEXT:    sll a7, a7, a3
 ; RV64IA-NEXT:    sra a7, a7, a3
-; RV64IA-NEXT:    bge a7, a1, .LBB36_3
-; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB36_1 Depth=1
+; RV64IA-NEXT:    bge a7, a1, .LBB41_3
+; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB41_1 Depth=1
 ; RV64IA-NEXT:    xor a6, a5, a1
 ; RV64IA-NEXT:    and a6, a6, a4
 ; RV64IA-NEXT:    xor a6, a5, a6
-; RV64IA-NEXT:  .LBB36_3: # in Loop: Header=BB36_1 Depth=1
+; RV64IA-NEXT:  .LBB41_3: # in Loop: Header=BB41_1 Depth=1
 ; RV64IA-NEXT:    sc.w a6, a6, (a2)
-; RV64IA-NEXT:    bnez a6, .LBB36_1
+; RV64IA-NEXT:    bnez a6, .LBB41_1
 ; RV64IA-NEXT:  # %bb.4:
 ; RV64IA-NEXT:    srlw a0, a5, a0
 ; RV64IA-NEXT:    ret
@@ -2307,9 +2620,9 @@ define i8 @atomicrmw_max_i8_release(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    mv s1, a1
 ; RV32I-NEXT:    slli a0, a1, 24
 ; RV32I-NEXT:    srai s2, a0, 24
-; RV32I-NEXT:    j .LBB37_2
-; RV32I-NEXT:  .LBB37_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB37_2 Depth=1
+; RV32I-NEXT:    j .LBB42_2
+; RV32I-NEXT:  .LBB42_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB42_2 Depth=1
 ; RV32I-NEXT:    sb a3, 15(sp)
 ; RV32I-NEXT:    addi a1, sp, 15
 ; RV32I-NEXT:    li a3, 3
@@ -2317,18 +2630,18 @@ define i8 @atomicrmw_max_i8_release(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lbu a3, 15(sp)
-; RV32I-NEXT:    bnez a0, .LBB37_4
-; RV32I-NEXT:  .LBB37_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB42_4
+; RV32I-NEXT:  .LBB42_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    slli a0, a3, 24
 ; RV32I-NEXT:    srai a0, a0, 24
 ; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    blt s2, a0, .LBB37_1
+; RV32I-NEXT:    blt s2, a0, .LBB42_1
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB37_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB42_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
-; RV32I-NEXT:    j .LBB37_1
-; RV32I-NEXT:  .LBB37_4: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB42_1
+; RV32I-NEXT:  .LBB42_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
@@ -2348,20 +2661,20 @@ define i8 @atomicrmw_max_i8_release(ptr %a, i8 %b) nounwind {
 ; RV32IA-NEXT:    srai a1, a1, 24
 ; RV32IA-NEXT:    sll a1, a1, a0
 ; RV32IA-NEXT:    xori a3, a3, 24
-; RV32IA-NEXT:  .LBB37_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB42_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w a5, (a2)
 ; RV32IA-NEXT:    and a7, a5, a4
 ; RV32IA-NEXT:    mv a6, a5
 ; RV32IA-NEXT:    sll a7, a7, a3
 ; RV32IA-NEXT:    sra a7, a7, a3
-; RV32IA-NEXT:    bge a7, a1, .LBB37_3
-; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB37_1 Depth=1
+; RV32IA-NEXT:    bge a7, a1, .LBB42_3
+; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB42_1 Depth=1
 ; RV32IA-NEXT:    xor a6, a5, a1
 ; RV32IA-NEXT:    and a6, a6, a4
 ; RV32IA-NEXT:    xor a6, a5, a6
-; RV32IA-NEXT:  .LBB37_3: # in Loop: Header=BB37_1 Depth=1
+; RV32IA-NEXT:  .LBB42_3: # in Loop: Header=BB42_1 Depth=1
 ; RV32IA-NEXT:    sc.w.rl a6, a6, (a2)
-; RV32IA-NEXT:    bnez a6, .LBB37_1
+; RV32IA-NEXT:    bnez a6, .LBB42_1
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a5, a0
 ; RV32IA-NEXT:    ret
@@ -2378,9 +2691,9 @@ define i8 @atomicrmw_max_i8_release(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    slli a0, a1, 56
 ; RV64I-NEXT:    srai s2, a0, 56
-; RV64I-NEXT:    j .LBB37_2
-; RV64I-NEXT:  .LBB37_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB37_2 Depth=1
+; RV64I-NEXT:    j .LBB42_2
+; RV64I-NEXT:  .LBB42_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB42_2 Depth=1
 ; RV64I-NEXT:    sb a3, 15(sp)
 ; RV64I-NEXT:    addi a1, sp, 15
 ; RV64I-NEXT:    li a3, 3
@@ -2388,18 +2701,18 @@ define i8 @atomicrmw_max_i8_release(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    lbu a3, 15(sp)
-; RV64I-NEXT:    bnez a0, .LBB37_4
-; RV64I-NEXT:  .LBB37_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB42_4
+; RV64I-NEXT:  .LBB42_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    slli a0, a3, 56
 ; RV64I-NEXT:    srai a0, a0, 56
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    blt s2, a0, .LBB37_1
+; RV64I-NEXT:    blt s2, a0, .LBB42_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB37_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB42_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB37_1
-; RV64I-NEXT:  .LBB37_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB42_1
+; RV64I-NEXT:  .LBB42_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
@@ -2419,20 +2732,20 @@ define i8 @atomicrmw_max_i8_release(ptr %a, i8 %b) nounwind {
 ; RV64IA-NEXT:    srai a1, a1, 56
 ; RV64IA-NEXT:    sllw a1, a1, a0
 ; RV64IA-NEXT:    xori a3, a3, 56
-; RV64IA-NEXT:  .LBB37_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB42_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w a5, (a2)
 ; RV64IA-NEXT:    and a7, a5, a4
 ; RV64IA-NEXT:    mv a6, a5
 ; RV64IA-NEXT:    sll a7, a7, a3
 ; RV64IA-NEXT:    sra a7, a7, a3
-; RV64IA-NEXT:    bge a7, a1, .LBB37_3
-; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB37_1 Depth=1
+; RV64IA-NEXT:    bge a7, a1, .LBB42_3
+; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB42_1 Depth=1
 ; RV64IA-NEXT:    xor a6, a5, a1
 ; RV64IA-NEXT:    and a6, a6, a4
 ; RV64IA-NEXT:    xor a6, a5, a6
-; RV64IA-NEXT:  .LBB37_3: # in Loop: Header=BB37_1 Depth=1
+; RV64IA-NEXT:  .LBB42_3: # in Loop: Header=BB42_1 Depth=1
 ; RV64IA-NEXT:    sc.w.rl a6, a6, (a2)
-; RV64IA-NEXT:    bnez a6, .LBB37_1
+; RV64IA-NEXT:    bnez a6, .LBB42_1
 ; RV64IA-NEXT:  # %bb.4:
 ; RV64IA-NEXT:    srlw a0, a5, a0
 ; RV64IA-NEXT:    ret
@@ -2453,9 +2766,9 @@ define i8 @atomicrmw_max_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    mv s1, a1
 ; RV32I-NEXT:    slli a0, a1, 24
 ; RV32I-NEXT:    srai s2, a0, 24
-; RV32I-NEXT:    j .LBB38_2
-; RV32I-NEXT:  .LBB38_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB38_2 Depth=1
+; RV32I-NEXT:    j .LBB43_2
+; RV32I-NEXT:  .LBB43_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB43_2 Depth=1
 ; RV32I-NEXT:    sb a3, 15(sp)
 ; RV32I-NEXT:    addi a1, sp, 15
 ; RV32I-NEXT:    li a3, 4
@@ -2463,18 +2776,18 @@ define i8 @atomicrmw_max_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lbu a3, 15(sp)
-; RV32I-NEXT:    bnez a0, .LBB38_4
-; RV32I-NEXT:  .LBB38_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB43_4
+; RV32I-NEXT:  .LBB43_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    slli a0, a3, 24
 ; RV32I-NEXT:    srai a0, a0, 24
 ; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    blt s2, a0, .LBB38_1
+; RV32I-NEXT:    blt s2, a0, .LBB43_1
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB38_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB43_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
-; RV32I-NEXT:    j .LBB38_1
-; RV32I-NEXT:  .LBB38_4: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB43_1
+; RV32I-NEXT:  .LBB43_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
@@ -2494,20 +2807,20 @@ define i8 @atomicrmw_max_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ; RV32IA-NEXT:    srai a1, a1, 24
 ; RV32IA-NEXT:    sll a1, a1, a0
 ; RV32IA-NEXT:    xori a3, a3, 24
-; RV32IA-NEXT:  .LBB38_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB43_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w.aq a5, (a2)
 ; RV32IA-NEXT:    and a7, a5, a4
 ; RV32IA-NEXT:    mv a6, a5
 ; RV32IA-NEXT:    sll a7, a7, a3
 ; RV32IA-NEXT:    sra a7, a7, a3
-; RV32IA-NEXT:    bge a7, a1, .LBB38_3
-; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB38_1 Depth=1
+; RV32IA-NEXT:    bge a7, a1, .LBB43_3
+; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB43_1 Depth=1
 ; RV32IA-NEXT:    xor a6, a5, a1
 ; RV32IA-NEXT:    and a6, a6, a4
 ; RV32IA-NEXT:    xor a6, a5, a6
-; RV32IA-NEXT:  .LBB38_3: # in Loop: Header=BB38_1 Depth=1
+; RV32IA-NEXT:  .LBB43_3: # in Loop: Header=BB43_1 Depth=1
 ; RV32IA-NEXT:    sc.w.rl a6, a6, (a2)
-; RV32IA-NEXT:    bnez a6, .LBB38_1
+; RV32IA-NEXT:    bnez a6, .LBB43_1
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a5, a0
 ; RV32IA-NEXT:    ret
@@ -2524,9 +2837,9 @@ define i8 @atomicrmw_max_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    slli a0, a1, 56
 ; RV64I-NEXT:    srai s2, a0, 56
-; RV64I-NEXT:    j .LBB38_2
-; RV64I-NEXT:  .LBB38_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB38_2 Depth=1
+; RV64I-NEXT:    j .LBB43_2
+; RV64I-NEXT:  .LBB43_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB43_2 Depth=1
 ; RV64I-NEXT:    sb a3, 15(sp)
 ; RV64I-NEXT:    addi a1, sp, 15
 ; RV64I-NEXT:    li a3, 4
@@ -2534,18 +2847,18 @@ define i8 @atomicrmw_max_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    lbu a3, 15(sp)
-; RV64I-NEXT:    bnez a0, .LBB38_4
-; RV64I-NEXT:  .LBB38_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB43_4
+; RV64I-NEXT:  .LBB43_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    slli a0, a3, 56
 ; RV64I-NEXT:    srai a0, a0, 56
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    blt s2, a0, .LBB38_1
+; RV64I-NEXT:    blt s2, a0, .LBB43_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB38_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB43_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB38_1
-; RV64I-NEXT:  .LBB38_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB43_1
+; RV64I-NEXT:  .LBB43_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
@@ -2565,20 +2878,20 @@ define i8 @atomicrmw_max_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ; RV64IA-NEXT:    srai a1, a1, 56
 ; RV64IA-NEXT:    sllw a1, a1, a0
 ; RV64IA-NEXT:    xori a3, a3, 56
-; RV64IA-NEXT:  .LBB38_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB43_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w.aq a5, (a2)
 ; RV64IA-NEXT:    and a7, a5, a4
 ; RV64IA-NEXT:    mv a6, a5
 ; RV64IA-NEXT:    sll a7, a7, a3
 ; RV64IA-NEXT:    sra a7, a7, a3
-; RV64IA-NEXT:    bge a7, a1, .LBB38_3
-; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB38_1 Depth=1
+; RV64IA-NEXT:    bge a7, a1, .LBB43_3
+; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB43_1 Depth=1
 ; RV64IA-NEXT:    xor a6, a5, a1
 ; RV64IA-NEXT:    and a6, a6, a4
 ; RV64IA-NEXT:    xor a6, a5, a6
-; RV64IA-NEXT:  .LBB38_3: # in Loop: Header=BB38_1 Depth=1
+; RV64IA-NEXT:  .LBB43_3: # in Loop: Header=BB43_1 Depth=1
 ; RV64IA-NEXT:    sc.w.rl a6, a6, (a2)
-; RV64IA-NEXT:    bnez a6, .LBB38_1
+; RV64IA-NEXT:    bnez a6, .LBB43_1
 ; RV64IA-NEXT:  # %bb.4:
 ; RV64IA-NEXT:    srlw a0, a5, a0
 ; RV64IA-NEXT:    ret
@@ -2599,9 +2912,9 @@ define i8 @atomicrmw_max_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    mv s1, a1
 ; RV32I-NEXT:    slli a0, a1, 24
 ; RV32I-NEXT:    srai s2, a0, 24
-; RV32I-NEXT:    j .LBB39_2
-; RV32I-NEXT:  .LBB39_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB39_2 Depth=1
+; RV32I-NEXT:    j .LBB44_2
+; RV32I-NEXT:  .LBB44_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB44_2 Depth=1
 ; RV32I-NEXT:    sb a3, 15(sp)
 ; RV32I-NEXT:    addi a1, sp, 15
 ; RV32I-NEXT:    li a3, 5
@@ -2609,18 +2922,18 @@ define i8 @atomicrmw_max_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lbu a3, 15(sp)
-; RV32I-NEXT:    bnez a0, .LBB39_4
-; RV32I-NEXT:  .LBB39_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB44_4
+; RV32I-NEXT:  .LBB44_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    slli a0, a3, 24
 ; RV32I-NEXT:    srai a0, a0, 24
 ; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    blt s2, a0, .LBB39_1
+; RV32I-NEXT:    blt s2, a0, .LBB44_1
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB39_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB44_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
-; RV32I-NEXT:    j .LBB39_1
-; RV32I-NEXT:  .LBB39_4: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB44_1
+; RV32I-NEXT:  .LBB44_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
@@ -2640,20 +2953,20 @@ define i8 @atomicrmw_max_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; RV32IA-NEXT:    srai a1, a1, 24
 ; RV32IA-NEXT:    sll a1, a1, a0
 ; RV32IA-NEXT:    xori a3, a3, 24
-; RV32IA-NEXT:  .LBB39_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB44_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w.aqrl a5, (a2)
 ; RV32IA-NEXT:    and a7, a5, a4
 ; RV32IA-NEXT:    mv a6, a5
 ; RV32IA-NEXT:    sll a7, a7, a3
 ; RV32IA-NEXT:    sra a7, a7, a3
-; RV32IA-NEXT:    bge a7, a1, .LBB39_3
-; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB39_1 Depth=1
+; RV32IA-NEXT:    bge a7, a1, .LBB44_3
+; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB44_1 Depth=1
 ; RV32IA-NEXT:    xor a6, a5, a1
 ; RV32IA-NEXT:    and a6, a6, a4
 ; RV32IA-NEXT:    xor a6, a5, a6
-; RV32IA-NEXT:  .LBB39_3: # in Loop: Header=BB39_1 Depth=1
+; RV32IA-NEXT:  .LBB44_3: # in Loop: Header=BB44_1 Depth=1
 ; RV32IA-NEXT:    sc.w.rl a6, a6, (a2)
-; RV32IA-NEXT:    bnez a6, .LBB39_1
+; RV32IA-NEXT:    bnez a6, .LBB44_1
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a5, a0
 ; RV32IA-NEXT:    ret
@@ -2670,9 +2983,9 @@ define i8 @atomicrmw_max_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    slli a0, a1, 56
 ; RV64I-NEXT:    srai s2, a0, 56
-; RV64I-NEXT:    j .LBB39_2
-; RV64I-NEXT:  .LBB39_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB39_2 Depth=1
+; RV64I-NEXT:    j .LBB44_2
+; RV64I-NEXT:  .LBB44_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB44_2 Depth=1
 ; RV64I-NEXT:    sb a3, 15(sp)
 ; RV64I-NEXT:    addi a1, sp, 15
 ; RV64I-NEXT:    li a3, 5
@@ -2680,18 +2993,18 @@ define i8 @atomicrmw_max_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    lbu a3, 15(sp)
-; RV64I-NEXT:    bnez a0, .LBB39_4
-; RV64I-NEXT:  .LBB39_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB44_4
+; RV64I-NEXT:  .LBB44_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    slli a0, a3, 56
 ; RV64I-NEXT:    srai a0, a0, 56
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    blt s2, a0, .LBB39_1
+; RV64I-NEXT:    blt s2, a0, .LBB44_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB39_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB44_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB39_1
-; RV64I-NEXT:  .LBB39_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB44_1
+; RV64I-NEXT:  .LBB44_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
@@ -2711,20 +3024,20 @@ define i8 @atomicrmw_max_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; RV64IA-NEXT:    srai a1, a1, 56
 ; RV64IA-NEXT:    sllw a1, a1, a0
 ; RV64IA-NEXT:    xori a3, a3, 56
-; RV64IA-NEXT:  .LBB39_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB44_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w.aqrl a5, (a2)
 ; RV64IA-NEXT:    and a7, a5, a4
 ; RV64IA-NEXT:    mv a6, a5
 ; RV64IA-NEXT:    sll a7, a7, a3
 ; RV64IA-NEXT:    sra a7, a7, a3
-; RV64IA-NEXT:    bge a7, a1, .LBB39_3
-; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB39_1 Depth=1
+; RV64IA-NEXT:    bge a7, a1, .LBB44_3
+; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB44_1 Depth=1
 ; RV64IA-NEXT:    xor a6, a5, a1
 ; RV64IA-NEXT:    and a6, a6, a4
 ; RV64IA-NEXT:    xor a6, a5, a6
-; RV64IA-NEXT:  .LBB39_3: # in Loop: Header=BB39_1 Depth=1
+; RV64IA-NEXT:  .LBB44_3: # in Loop: Header=BB44_1 Depth=1
 ; RV64IA-NEXT:    sc.w.rl a6, a6, (a2)
-; RV64IA-NEXT:    bnez a6, .LBB39_1
+; RV64IA-NEXT:    bnez a6, .LBB44_1
 ; RV64IA-NEXT:  # %bb.4:
 ; RV64IA-NEXT:    srlw a0, a5, a0
 ; RV64IA-NEXT:    ret
@@ -2745,9 +3058,9 @@ define i8 @atomicrmw_min_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    mv s1, a1
 ; RV32I-NEXT:    slli a0, a1, 24
 ; RV32I-NEXT:    srai s2, a0, 24
-; RV32I-NEXT:    j .LBB40_2
-; RV32I-NEXT:  .LBB40_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB40_2 Depth=1
+; RV32I-NEXT:    j .LBB45_2
+; RV32I-NEXT:  .LBB45_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB45_2 Depth=1
 ; RV32I-NEXT:    sb a3, 15(sp)
 ; RV32I-NEXT:    addi a1, sp, 15
 ; RV32I-NEXT:    mv a0, s0
@@ -2755,18 +3068,18 @@ define i8 @atomicrmw_min_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lbu a3, 15(sp)
-; RV32I-NEXT:    bnez a0, .LBB40_4
-; RV32I-NEXT:  .LBB40_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB45_4
+; RV32I-NEXT:  .LBB45_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    slli a0, a3, 24
 ; RV32I-NEXT:    srai a0, a0, 24
 ; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    bge s2, a0, .LBB40_1
+; RV32I-NEXT:    bge s2, a0, .LBB45_1
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB40_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB45_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
-; RV32I-NEXT:    j .LBB40_1
-; RV32I-NEXT:  .LBB40_4: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB45_1
+; RV32I-NEXT:  .LBB45_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
@@ -2786,20 +3099,20 @@ define i8 @atomicrmw_min_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV32IA-NEXT:    srai a1, a1, 24
 ; RV32IA-NEXT:    sll a1, a1, a0
 ; RV32IA-NEXT:    xori a3, a3, 24
-; RV32IA-NEXT:  .LBB40_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB45_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w a5, (a2)
 ; RV32IA-NEXT:    and a7, a5, a4
 ; RV32IA-NEXT:    mv a6, a5
 ; RV32IA-NEXT:    sll a7, a7, a3
 ; RV32IA-NEXT:    sra a7, a7, a3
-; RV32IA-NEXT:    bge a1, a7, .LBB40_3
-; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB40_1 Depth=1
+; RV32IA-NEXT:    bge a1, a7, .LBB45_3
+; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB45_1 Depth=1
 ; RV32IA-NEXT:    xor a6, a5, a1
 ; RV32IA-NEXT:    and a6, a6, a4
 ; RV32IA-NEXT:    xor a6, a5, a6
-; RV32IA-NEXT:  .LBB40_3: # in Loop: Header=BB40_1 Depth=1
+; RV32IA-NEXT:  .LBB45_3: # in Loop: Header=BB45_1 Depth=1
 ; RV32IA-NEXT:    sc.w a6, a6, (a2)
-; RV32IA-NEXT:    bnez a6, .LBB40_1
+; RV32IA-NEXT:    bnez a6, .LBB45_1
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a5, a0
 ; RV32IA-NEXT:    ret
@@ -2816,9 +3129,9 @@ define i8 @atomicrmw_min_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    slli a0, a1, 56
 ; RV64I-NEXT:    srai s2, a0, 56
-; RV64I-NEXT:    j .LBB40_2
-; RV64I-NEXT:  .LBB40_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB40_2 Depth=1
+; RV64I-NEXT:    j .LBB45_2
+; RV64I-NEXT:  .LBB45_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB45_2 Depth=1
 ; RV64I-NEXT:    sb a3, 15(sp)
 ; RV64I-NEXT:    addi a1, sp, 15
 ; RV64I-NEXT:    mv a0, s0
@@ -2826,18 +3139,18 @@ define i8 @atomicrmw_min_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    lbu a3, 15(sp)
-; RV64I-NEXT:    bnez a0, .LBB40_4
-; RV64I-NEXT:  .LBB40_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB45_4
+; RV64I-NEXT:  .LBB45_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    slli a0, a3, 56
 ; RV64I-NEXT:    srai a0, a0, 56
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    bge s2, a0, .LBB40_1
+; RV64I-NEXT:    bge s2, a0, .LBB45_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB40_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB45_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB40_1
-; RV64I-NEXT:  .LBB40_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB45_1
+; RV64I-NEXT:  .LBB45_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
@@ -2857,20 +3170,20 @@ define i8 @atomicrmw_min_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV64IA-NEXT:    srai a1, a1, 56
 ; RV64IA-NEXT:    sllw a1, a1, a0
 ; RV64IA-NEXT:    xori a3, a3, 56
-; RV64IA-NEXT:  .LBB40_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB45_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w a5, (a2)
 ; RV64IA-NEXT:    and a7, a5, a4
 ; RV64IA-NEXT:    mv a6, a5
 ; RV64IA-NEXT:    sll a7, a7, a3
 ; RV64IA-NEXT:    sra a7, a7, a3
-; RV64IA-NEXT:    bge a1, a7, .LBB40_3
-; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB40_1 Depth=1
+; RV64IA-NEXT:    bge a1, a7, .LBB45_3
+; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB45_1 Depth=1
 ; RV64IA-NEXT:    xor a6, a5, a1
 ; RV64IA-NEXT:    and a6, a6, a4
 ; RV64IA-NEXT:    xor a6, a5, a6
-; RV64IA-NEXT:  .LBB40_3: # in Loop: Header=BB40_1 Depth=1
+; RV64IA-NEXT:  .LBB45_3: # in Loop: Header=BB45_1 Depth=1
 ; RV64IA-NEXT:    sc.w a6, a6, (a2)
-; RV64IA-NEXT:    bnez a6, .LBB40_1
+; RV64IA-NEXT:    bnez a6, .LBB45_1
 ; RV64IA-NEXT:  # %bb.4:
 ; RV64IA-NEXT:    srlw a0, a5, a0
 ; RV64IA-NEXT:    ret
@@ -2891,9 +3204,9 @@ define i8 @atomicrmw_min_i8_acquire(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    mv s1, a1
 ; RV32I-NEXT:    slli a0, a1, 24
 ; RV32I-NEXT:    srai s2, a0, 24
-; RV32I-NEXT:    j .LBB41_2
-; RV32I-NEXT:  .LBB41_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB41_2 Depth=1
+; RV32I-NEXT:    j .LBB46_2
+; RV32I-NEXT:  .LBB46_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB46_2 Depth=1
 ; RV32I-NEXT:    sb a3, 15(sp)
 ; RV32I-NEXT:    addi a1, sp, 15
 ; RV32I-NEXT:    li a3, 2
@@ -2901,18 +3214,18 @@ define i8 @atomicrmw_min_i8_acquire(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lbu a3, 15(sp)
-; RV32I-NEXT:    bnez a0, .LBB41_4
-; RV32I-NEXT:  .LBB41_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB46_4
+; RV32I-NEXT:  .LBB46_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    slli a0, a3, 24
 ; RV32I-NEXT:    srai a0, a0, 24
 ; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    bge s2, a0, .LBB41_1
+; RV32I-NEXT:    bge s2, a0, .LBB46_1
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB41_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB46_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
-; RV32I-NEXT:    j .LBB41_1
-; RV32I-NEXT:  .LBB41_4: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB46_1
+; RV32I-NEXT:  .LBB46_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
@@ -2932,20 +3245,20 @@ define i8 @atomicrmw_min_i8_acquire(ptr %a, i8 %b) nounwind {
 ; RV32IA-NEXT:    srai a1, a1, 24
 ; RV32IA-NEXT:    sll a1, a1, a0
 ; RV32IA-NEXT:    xori a3, a3, 24
-; RV32IA-NEXT:  .LBB41_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB46_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w.aq a5, (a2)
 ; RV32IA-NEXT:    and a7, a5, a4
 ; RV32IA-NEXT:    mv a6, a5
 ; RV32IA-NEXT:    sll a7, a7, a3
 ; RV32IA-NEXT:    sra a7, a7, a3
-; RV32IA-NEXT:    bge a1, a7, .LBB41_3
-; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB41_1 Depth=1
+; RV32IA-NEXT:    bge a1, a7, .LBB46_3
+; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB46_1 Depth=1
 ; RV32IA-NEXT:    xor a6, a5, a1
 ; RV32IA-NEXT:    and a6, a6, a4
 ; RV32IA-NEXT:    xor a6, a5, a6
-; RV32IA-NEXT:  .LBB41_3: # in Loop: Header=BB41_1 Depth=1
+; RV32IA-NEXT:  .LBB46_3: # in Loop: Header=BB46_1 Depth=1
 ; RV32IA-NEXT:    sc.w a6, a6, (a2)
-; RV32IA-NEXT:    bnez a6, .LBB41_1
+; RV32IA-NEXT:    bnez a6, .LBB46_1
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a5, a0
 ; RV32IA-NEXT:    ret
@@ -2962,9 +3275,9 @@ define i8 @atomicrmw_min_i8_acquire(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    slli a0, a1, 56
 ; RV64I-NEXT:    srai s2, a0, 56
-; RV64I-NEXT:    j .LBB41_2
-; RV64I-NEXT:  .LBB41_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB41_2 Depth=1
+; RV64I-NEXT:    j .LBB46_2
+; RV64I-NEXT:  .LBB46_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB46_2 Depth=1
 ; RV64I-NEXT:    sb a3, 15(sp)
 ; RV64I-NEXT:    addi a1, sp, 15
 ; RV64I-NEXT:    li a3, 2
@@ -2972,18 +3285,18 @@ define i8 @atomicrmw_min_i8_acquire(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    lbu a3, 15(sp)
-; RV64I-NEXT:    bnez a0, .LBB41_4
-; RV64I-NEXT:  .LBB41_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB46_4
+; RV64I-NEXT:  .LBB46_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    slli a0, a3, 56
 ; RV64I-NEXT:    srai a0, a0, 56
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    bge s2, a0, .LBB41_1
+; RV64I-NEXT:    bge s2, a0, .LBB46_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB41_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB46_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB41_1
-; RV64I-NEXT:  .LBB41_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB46_1
+; RV64I-NEXT:  .LBB46_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
@@ -3003,20 +3316,20 @@ define i8 @atomicrmw_min_i8_acquire(ptr %a, i8 %b) nounwind {
 ; RV64IA-NEXT:    srai a1, a1, 56
 ; RV64IA-NEXT:    sllw a1, a1, a0
 ; RV64IA-NEXT:    xori a3, a3, 56
-; RV64IA-NEXT:  .LBB41_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB46_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w.aq a5, (a2)
 ; RV64IA-NEXT:    and a7, a5, a4
 ; RV64IA-NEXT:    mv a6, a5
 ; RV64IA-NEXT:    sll a7, a7, a3
 ; RV64IA-NEXT:    sra a7, a7, a3
-; RV64IA-NEXT:    bge a1, a7, .LBB41_3
-; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB41_1 Depth=1
+; RV64IA-NEXT:    bge a1, a7, .LBB46_3
+; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB46_1 Depth=1
 ; RV64IA-NEXT:    xor a6, a5, a1
 ; RV64IA-NEXT:    and a6, a6, a4
 ; RV64IA-NEXT:    xor a6, a5, a6
-; RV64IA-NEXT:  .LBB41_3: # in Loop: Header=BB41_1 Depth=1
+; RV64IA-NEXT:  .LBB46_3: # in Loop: Header=BB46_1 Depth=1
 ; RV64IA-NEXT:    sc.w a6, a6, (a2)
-; RV64IA-NEXT:    bnez a6, .LBB41_1
+; RV64IA-NEXT:    bnez a6, .LBB46_1
 ; RV64IA-NEXT:  # %bb.4:
 ; RV64IA-NEXT:    srlw a0, a5, a0
 ; RV64IA-NEXT:    ret
@@ -3037,9 +3350,9 @@ define i8 @atomicrmw_min_i8_release(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    mv s1, a1
 ; RV32I-NEXT:    slli a0, a1, 24
 ; RV32I-NEXT:    srai s2, a0, 24
-; RV32I-NEXT:    j .LBB42_2
-; RV32I-NEXT:  .LBB42_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB42_2 Depth=1
+; RV32I-NEXT:    j .LBB47_2
+; RV32I-NEXT:  .LBB47_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB47_2 Depth=1
 ; RV32I-NEXT:    sb a3, 15(sp)
 ; RV32I-NEXT:    addi a1, sp, 15
 ; RV32I-NEXT:    li a3, 3
@@ -3047,18 +3360,18 @@ define i8 @atomicrmw_min_i8_release(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lbu a3, 15(sp)
-; RV32I-NEXT:    bnez a0, .LBB42_4
-; RV32I-NEXT:  .LBB42_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB47_4
+; RV32I-NEXT:  .LBB47_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    slli a0, a3, 24
 ; RV32I-NEXT:    srai a0, a0, 24
 ; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    bge s2, a0, .LBB42_1
+; RV32I-NEXT:    bge s2, a0, .LBB47_1
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB42_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB47_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
-; RV32I-NEXT:    j .LBB42_1
-; RV32I-NEXT:  .LBB42_4: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB47_1
+; RV32I-NEXT:  .LBB47_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
@@ -3078,20 +3391,20 @@ define i8 @atomicrmw_min_i8_release(ptr %a, i8 %b) nounwind {
 ; RV32IA-NEXT:    srai a1, a1, 24
 ; RV32IA-NEXT:    sll a1, a1, a0
 ; RV32IA-NEXT:    xori a3, a3, 24
-; RV32IA-NEXT:  .LBB42_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB47_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w a5, (a2)
 ; RV32IA-NEXT:    and a7, a5, a4
 ; RV32IA-NEXT:    mv a6, a5
 ; RV32IA-NEXT:    sll a7, a7, a3
 ; RV32IA-NEXT:    sra a7, a7, a3
-; RV32IA-NEXT:    bge a1, a7, .LBB42_3
-; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB42_1 Depth=1
+; RV32IA-NEXT:    bge a1, a7, .LBB47_3
+; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB47_1 Depth=1
 ; RV32IA-NEXT:    xor a6, a5, a1
 ; RV32IA-NEXT:    and a6, a6, a4
 ; RV32IA-NEXT:    xor a6, a5, a6
-; RV32IA-NEXT:  .LBB42_3: # in Loop: Header=BB42_1 Depth=1
+; RV32IA-NEXT:  .LBB47_3: # in Loop: Header=BB47_1 Depth=1
 ; RV32IA-NEXT:    sc.w.rl a6, a6, (a2)
-; RV32IA-NEXT:    bnez a6, .LBB42_1
+; RV32IA-NEXT:    bnez a6, .LBB47_1
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a5, a0
 ; RV32IA-NEXT:    ret
@@ -3108,9 +3421,9 @@ define i8 @atomicrmw_min_i8_release(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    slli a0, a1, 56
 ; RV64I-NEXT:    srai s2, a0, 56
-; RV64I-NEXT:    j .LBB42_2
-; RV64I-NEXT:  .LBB42_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB42_2 Depth=1
+; RV64I-NEXT:    j .LBB47_2
+; RV64I-NEXT:  .LBB47_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB47_2 Depth=1
 ; RV64I-NEXT:    sb a3, 15(sp)
 ; RV64I-NEXT:    addi a1, sp, 15
 ; RV64I-NEXT:    li a3, 3
@@ -3118,18 +3431,18 @@ define i8 @atomicrmw_min_i8_release(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    lbu a3, 15(sp)
-; RV64I-NEXT:    bnez a0, .LBB42_4
-; RV64I-NEXT:  .LBB42_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB47_4
+; RV64I-NEXT:  .LBB47_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    slli a0, a3, 56
 ; RV64I-NEXT:    srai a0, a0, 56
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    bge s2, a0, .LBB42_1
+; RV64I-NEXT:    bge s2, a0, .LBB47_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB42_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB47_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB42_1
-; RV64I-NEXT:  .LBB42_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB47_1
+; RV64I-NEXT:  .LBB47_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
@@ -3149,20 +3462,20 @@ define i8 @atomicrmw_min_i8_release(ptr %a, i8 %b) nounwind {
 ; RV64IA-NEXT:    srai a1, a1, 56
 ; RV64IA-NEXT:    sllw a1, a1, a0
 ; RV64IA-NEXT:    xori a3, a3, 56
-; RV64IA-NEXT:  .LBB42_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB47_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w a5, (a2)
 ; RV64IA-NEXT:    and a7, a5, a4
 ; RV64IA-NEXT:    mv a6, a5
 ; RV64IA-NEXT:    sll a7, a7, a3
 ; RV64IA-NEXT:    sra a7, a7, a3
-; RV64IA-NEXT:    bge a1, a7, .LBB42_3
-; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB42_1 Depth=1
+; RV64IA-NEXT:    bge a1, a7, .LBB47_3
+; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB47_1 Depth=1
 ; RV64IA-NEXT:    xor a6, a5, a1
 ; RV64IA-NEXT:    and a6, a6, a4
 ; RV64IA-NEXT:    xor a6, a5, a6
-; RV64IA-NEXT:  .LBB42_3: # in Loop: Header=BB42_1 Depth=1
+; RV64IA-NEXT:  .LBB47_3: # in Loop: Header=BB47_1 Depth=1
 ; RV64IA-NEXT:    sc.w.rl a6, a6, (a2)
-; RV64IA-NEXT:    bnez a6, .LBB42_1
+; RV64IA-NEXT:    bnez a6, .LBB47_1
 ; RV64IA-NEXT:  # %bb.4:
 ; RV64IA-NEXT:    srlw a0, a5, a0
 ; RV64IA-NEXT:    ret
@@ -3183,9 +3496,9 @@ define i8 @atomicrmw_min_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    mv s1, a1
 ; RV32I-NEXT:    slli a0, a1, 24
 ; RV32I-NEXT:    srai s2, a0, 24
-; RV32I-NEXT:    j .LBB43_2
-; RV32I-NEXT:  .LBB43_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB43_2 Depth=1
+; RV32I-NEXT:    j .LBB48_2
+; RV32I-NEXT:  .LBB48_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB48_2 Depth=1
 ; RV32I-NEXT:    sb a3, 15(sp)
 ; RV32I-NEXT:    addi a1, sp, 15
 ; RV32I-NEXT:    li a3, 4
@@ -3193,18 +3506,18 @@ define i8 @atomicrmw_min_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lbu a3, 15(sp)
-; RV32I-NEXT:    bnez a0, .LBB43_4
-; RV32I-NEXT:  .LBB43_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB48_4
+; RV32I-NEXT:  .LBB48_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    slli a0, a3, 24
 ; RV32I-NEXT:    srai a0, a0, 24
 ; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    bge s2, a0, .LBB43_1
+; RV32I-NEXT:    bge s2, a0, .LBB48_1
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB43_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB48_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
-; RV32I-NEXT:    j .LBB43_1
-; RV32I-NEXT:  .LBB43_4: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB48_1
+; RV32I-NEXT:  .LBB48_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
@@ -3224,20 +3537,20 @@ define i8 @atomicrmw_min_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ; RV32IA-NEXT:    srai a1, a1, 24
 ; RV32IA-NEXT:    sll a1, a1, a0
 ; RV32IA-NEXT:    xori a3, a3, 24
-; RV32IA-NEXT:  .LBB43_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB48_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w.aq a5, (a2)
 ; RV32IA-NEXT:    and a7, a5, a4
 ; RV32IA-NEXT:    mv a6, a5
 ; RV32IA-NEXT:    sll a7, a7, a3
 ; RV32IA-NEXT:    sra a7, a7, a3
-; RV32IA-NEXT:    bge a1, a7, .LBB43_3
-; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB43_1 Depth=1
+; RV32IA-NEXT:    bge a1, a7, .LBB48_3
+; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB48_1 Depth=1
 ; RV32IA-NEXT:    xor a6, a5, a1
 ; RV32IA-NEXT:    and a6, a6, a4
 ; RV32IA-NEXT:    xor a6, a5, a6
-; RV32IA-NEXT:  .LBB43_3: # in Loop: Header=BB43_1 Depth=1
+; RV32IA-NEXT:  .LBB48_3: # in Loop: Header=BB48_1 Depth=1
 ; RV32IA-NEXT:    sc.w.rl a6, a6, (a2)
-; RV32IA-NEXT:    bnez a6, .LBB43_1
+; RV32IA-NEXT:    bnez a6, .LBB48_1
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a5, a0
 ; RV32IA-NEXT:    ret
@@ -3254,28 +3567,28 @@ define i8 @atomicrmw_min_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    slli a0, a1, 56
 ; RV64I-NEXT:    srai s2, a0, 56
-; RV64I-NEXT:    j .LBB43_2
-; RV64I-NEXT:  .LBB43_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB43_2 Depth=1
-; RV64I-NEXT:    sb a3, 15(sp)
-; RV64I-NEXT:    addi a1, sp, 15
+; RV64I-NEXT:    j .LBB48_2
+; RV64I-NEXT:  .LBB48_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB48_2 Depth=1
+; RV64I-NEXT:    sb a3, 15(sp)
+; RV64I-NEXT:    addi a1, sp, 15
 ; RV64I-NEXT:    li a3, 4
 ; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    lbu a3, 15(sp)
-; RV64I-NEXT:    bnez a0, .LBB43_4
-; RV64I-NEXT:  .LBB43_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB48_4
+; RV64I-NEXT:  .LBB48_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    slli a0, a3, 56
 ; RV64I-NEXT:    srai a0, a0, 56
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    bge s2, a0, .LBB43_1
+; RV64I-NEXT:    bge s2, a0, .LBB48_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB43_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB48_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB43_1
-; RV64I-NEXT:  .LBB43_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB48_1
+; RV64I-NEXT:  .LBB48_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
@@ -3295,20 +3608,20 @@ define i8 @atomicrmw_min_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ; RV64IA-NEXT:    srai a1, a1, 56
 ; RV64IA-NEXT:    sllw a1, a1, a0
 ; RV64IA-NEXT:    xori a3, a3, 56
-; RV64IA-NEXT:  .LBB43_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB48_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w.aq a5, (a2)
 ; RV64IA-NEXT:    and a7, a5, a4
 ; RV64IA-NEXT:    mv a6, a5
 ; RV64IA-NEXT:    sll a7, a7, a3
 ; RV64IA-NEXT:    sra a7, a7, a3
-; RV64IA-NEXT:    bge a1, a7, .LBB43_3
-; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB43_1 Depth=1
+; RV64IA-NEXT:    bge a1, a7, .LBB48_3
+; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB48_1 Depth=1
 ; RV64IA-NEXT:    xor a6, a5, a1
 ; RV64IA-NEXT:    and a6, a6, a4
 ; RV64IA-NEXT:    xor a6, a5, a6
-; RV64IA-NEXT:  .LBB43_3: # in Loop: Header=BB43_1 Depth=1
+; RV64IA-NEXT:  .LBB48_3: # in Loop: Header=BB48_1 Depth=1
 ; RV64IA-NEXT:    sc.w.rl a6, a6, (a2)
-; RV64IA-NEXT:    bnez a6, .LBB43_1
+; RV64IA-NEXT:    bnez a6, .LBB48_1
 ; RV64IA-NEXT:  # %bb.4:
 ; RV64IA-NEXT:    srlw a0, a5, a0
 ; RV64IA-NEXT:    ret
@@ -3329,9 +3642,9 @@ define i8 @atomicrmw_min_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    mv s1, a1
 ; RV32I-NEXT:    slli a0, a1, 24
 ; RV32I-NEXT:    srai s2, a0, 24
-; RV32I-NEXT:    j .LBB44_2
-; RV32I-NEXT:  .LBB44_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB44_2 Depth=1
+; RV32I-NEXT:    j .LBB49_2
+; RV32I-NEXT:  .LBB49_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB49_2 Depth=1
 ; RV32I-NEXT:    sb a3, 15(sp)
 ; RV32I-NEXT:    addi a1, sp, 15
 ; RV32I-NEXT:    li a3, 5
@@ -3339,18 +3652,18 @@ define i8 @atomicrmw_min_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lbu a3, 15(sp)
-; RV32I-NEXT:    bnez a0, .LBB44_4
-; RV32I-NEXT:  .LBB44_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB49_4
+; RV32I-NEXT:  .LBB49_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    slli a0, a3, 24
 ; RV32I-NEXT:    srai a0, a0, 24
 ; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    bge s2, a0, .LBB44_1
+; RV32I-NEXT:    bge s2, a0, .LBB49_1
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB44_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB49_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
-; RV32I-NEXT:    j .LBB44_1
-; RV32I-NEXT:  .LBB44_4: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB49_1
+; RV32I-NEXT:  .LBB49_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
@@ -3370,20 +3683,20 @@ define i8 @atomicrmw_min_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; RV32IA-NEXT:    srai a1, a1, 24
 ; RV32IA-NEXT:    sll a1, a1, a0
 ; RV32IA-NEXT:    xori a3, a3, 24
-; RV32IA-NEXT:  .LBB44_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB49_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w.aqrl a5, (a2)
 ; RV32IA-NEXT:    and a7, a5, a4
 ; RV32IA-NEXT:    mv a6, a5
 ; RV32IA-NEXT:    sll a7, a7, a3
 ; RV32IA-NEXT:    sra a7, a7, a3
-; RV32IA-NEXT:    bge a1, a7, .LBB44_3
-; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB44_1 Depth=1
+; RV32IA-NEXT:    bge a1, a7, .LBB49_3
+; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB49_1 Depth=1
 ; RV32IA-NEXT:    xor a6, a5, a1
 ; RV32IA-NEXT:    and a6, a6, a4
 ; RV32IA-NEXT:    xor a6, a5, a6
-; RV32IA-NEXT:  .LBB44_3: # in Loop: Header=BB44_1 Depth=1
+; RV32IA-NEXT:  .LBB49_3: # in Loop: Header=BB49_1 Depth=1
 ; RV32IA-NEXT:    sc.w.rl a6, a6, (a2)
-; RV32IA-NEXT:    bnez a6, .LBB44_1
+; RV32IA-NEXT:    bnez a6, .LBB49_1
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a5, a0
 ; RV32IA-NEXT:    ret
@@ -3400,9 +3713,9 @@ define i8 @atomicrmw_min_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    slli a0, a1, 56
 ; RV64I-NEXT:    srai s2, a0, 56
-; RV64I-NEXT:    j .LBB44_2
-; RV64I-NEXT:  .LBB44_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB44_2 Depth=1
+; RV64I-NEXT:    j .LBB49_2
+; RV64I-NEXT:  .LBB49_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB49_2 Depth=1
 ; RV64I-NEXT:    sb a3, 15(sp)
 ; RV64I-NEXT:    addi a1, sp, 15
 ; RV64I-NEXT:    li a3, 5
@@ -3410,18 +3723,18 @@ define i8 @atomicrmw_min_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    lbu a3, 15(sp)
-; RV64I-NEXT:    bnez a0, .LBB44_4
-; RV64I-NEXT:  .LBB44_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB49_4
+; RV64I-NEXT:  .LBB49_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    slli a0, a3, 56
 ; RV64I-NEXT:    srai a0, a0, 56
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    bge s2, a0, .LBB44_1
+; RV64I-NEXT:    bge s2, a0, .LBB49_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB44_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB49_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB44_1
-; RV64I-NEXT:  .LBB44_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB49_1
+; RV64I-NEXT:  .LBB49_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
@@ -3441,20 +3754,20 @@ define i8 @atomicrmw_min_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; RV64IA-NEXT:    srai a1, a1, 56
 ; RV64IA-NEXT:    sllw a1, a1, a0
 ; RV64IA-NEXT:    xori a3, a3, 56
-; RV64IA-NEXT:  .LBB44_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB49_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w.aqrl a5, (a2)
 ; RV64IA-NEXT:    and a7, a5, a4
 ; RV64IA-NEXT:    mv a6, a5
 ; RV64IA-NEXT:    sll a7, a7, a3
 ; RV64IA-NEXT:    sra a7, a7, a3
-; RV64IA-NEXT:    bge a1, a7, .LBB44_3
-; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB44_1 Depth=1
+; RV64IA-NEXT:    bge a1, a7, .LBB49_3
+; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB49_1 Depth=1
 ; RV64IA-NEXT:    xor a6, a5, a1
 ; RV64IA-NEXT:    and a6, a6, a4
 ; RV64IA-NEXT:    xor a6, a5, a6
-; RV64IA-NEXT:  .LBB44_3: # in Loop: Header=BB44_1 Depth=1
+; RV64IA-NEXT:  .LBB49_3: # in Loop: Header=BB49_1 Depth=1
 ; RV64IA-NEXT:    sc.w.rl a6, a6, (a2)
-; RV64IA-NEXT:    bnez a6, .LBB44_1
+; RV64IA-NEXT:    bnez a6, .LBB49_1
 ; RV64IA-NEXT:  # %bb.4:
 ; RV64IA-NEXT:    srlw a0, a5, a0
 ; RV64IA-NEXT:    ret
@@ -3474,9 +3787,9 @@ define i8 @atomicrmw_umax_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    lbu a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
 ; RV32I-NEXT:    andi s2, a1, 255
-; RV32I-NEXT:    j .LBB45_2
-; RV32I-NEXT:  .LBB45_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB45_2 Depth=1
+; RV32I-NEXT:    j .LBB50_2
+; RV32I-NEXT:  .LBB50_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB50_2 Depth=1
 ; RV32I-NEXT:    sb a3, 15(sp)
 ; RV32I-NEXT:    addi a1, sp, 15
 ; RV32I-NEXT:    mv a0, s0
@@ -3484,17 +3797,17 @@ define i8 @atomicrmw_umax_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lbu a3, 15(sp)
-; RV32I-NEXT:    bnez a0, .LBB45_4
-; RV32I-NEXT:  .LBB45_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB50_4
+; RV32I-NEXT:  .LBB50_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    andi a0, a3, 255
 ; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    bltu s2, a0, .LBB45_1
+; RV32I-NEXT:    bltu s2, a0, .LBB50_1
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB45_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB50_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
-; RV32I-NEXT:    j .LBB45_1
-; RV32I-NEXT:  .LBB45_4: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB50_1
+; RV32I-NEXT:  .LBB50_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
@@ -3511,18 +3824,18 @@ define i8 @atomicrmw_umax_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV32IA-NEXT:    sll a3, a3, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:  .LBB45_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB50_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w a4, (a2)
 ; RV32IA-NEXT:    and a6, a4, a3
 ; RV32IA-NEXT:    mv a5, a4
-; RV32IA-NEXT:    bgeu a6, a1, .LBB45_3
-; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB45_1 Depth=1
+; RV32IA-NEXT:    bgeu a6, a1, .LBB50_3
+; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB50_1 Depth=1
 ; RV32IA-NEXT:    xor a5, a4, a1
 ; RV32IA-NEXT:    and a5, a5, a3
 ; RV32IA-NEXT:    xor a5, a4, a5
-; RV32IA-NEXT:  .LBB45_3: # in Loop: Header=BB45_1 Depth=1
+; RV32IA-NEXT:  .LBB50_3: # in Loop: Header=BB50_1 Depth=1
 ; RV32IA-NEXT:    sc.w a5, a5, (a2)
-; RV32IA-NEXT:    bnez a5, .LBB45_1
+; RV32IA-NEXT:    bnez a5, .LBB50_1
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a4, a0
 ; RV32IA-NEXT:    ret
@@ -3538,9 +3851,9 @@ define i8 @atomicrmw_umax_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    lbu a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    andi s2, a1, 255
-; RV64I-NEXT:    j .LBB45_2
-; RV64I-NEXT:  .LBB45_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB45_2 Depth=1
+; RV64I-NEXT:    j .LBB50_2
+; RV64I-NEXT:  .LBB50_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB50_2 Depth=1
 ; RV64I-NEXT:    sb a3, 15(sp)
 ; RV64I-NEXT:    addi a1, sp, 15
 ; RV64I-NEXT:    mv a0, s0
@@ -3548,17 +3861,17 @@ define i8 @atomicrmw_umax_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    lbu a3, 15(sp)
-; RV64I-NEXT:    bnez a0, .LBB45_4
-; RV64I-NEXT:  .LBB45_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB50_4
+; RV64I-NEXT:  .LBB50_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    andi a0, a3, 255
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    bltu s2, a0, .LBB45_1
+; RV64I-NEXT:    bltu s2, a0, .LBB50_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB45_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB50_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB45_1
-; RV64I-NEXT:  .LBB45_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB50_1
+; RV64I-NEXT:  .LBB50_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
@@ -3575,18 +3888,18 @@ define i8 @atomicrmw_umax_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:  .LBB45_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB50_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w a4, (a2)
 ; RV64IA-NEXT:    and a6, a4, a3
 ; RV64IA-NEXT:    mv a5, a4
-; RV64IA-NEXT:    bgeu a6, a1, .LBB45_3
-; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB45_1 Depth=1
+; RV64IA-NEXT:    bgeu a6, a1, .LBB50_3
+; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB50_1 Depth=1
 ; RV64IA-NEXT:    xor a5, a4, a1
 ; RV64IA-NEXT:    and a5, a5, a3
 ; RV64IA-NEXT:    xor a5, a4, a5
-; RV64IA-NEXT:  .LBB45_3: # in Loop: Header=BB45_1 Depth=1
+; RV64IA-NEXT:  .LBB50_3: # in Loop: Header=BB50_1 Depth=1
 ; RV64IA-NEXT:    sc.w a5, a5, (a2)
-; RV64IA-NEXT:    bnez a5, .LBB45_1
+; RV64IA-NEXT:    bnez a5, .LBB50_1
 ; RV64IA-NEXT:  # %bb.4:
 ; RV64IA-NEXT:    srlw a0, a4, a0
 ; RV64IA-NEXT:    ret
@@ -3606,9 +3919,9 @@ define i8 @atomicrmw_umax_i8_acquire(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    lbu a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
 ; RV32I-NEXT:    andi s2, a1, 255
-; RV32I-NEXT:    j .LBB46_2
-; RV32I-NEXT:  .LBB46_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB46_2 Depth=1
+; RV32I-NEXT:    j .LBB51_2
+; RV32I-NEXT:  .LBB51_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB51_2 Depth=1
 ; RV32I-NEXT:    sb a3, 15(sp)
 ; RV32I-NEXT:    addi a1, sp, 15
 ; RV32I-NEXT:    li a3, 2
@@ -3616,17 +3929,17 @@ define i8 @atomicrmw_umax_i8_acquire(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lbu a3, 15(sp)
-; RV32I-NEXT:    bnez a0, .LBB46_4
-; RV32I-NEXT:  .LBB46_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB51_4
+; RV32I-NEXT:  .LBB51_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    andi a0, a3, 255
 ; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    bltu s2, a0, .LBB46_1
+; RV32I-NEXT:    bltu s2, a0, .LBB51_1
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB46_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB51_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
-; RV32I-NEXT:    j .LBB46_1
-; RV32I-NEXT:  .LBB46_4: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB51_1
+; RV32I-NEXT:  .LBB51_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
@@ -3643,18 +3956,18 @@ define i8 @atomicrmw_umax_i8_acquire(ptr %a, i8 %b) nounwind {
 ; RV32IA-NEXT:    sll a3, a3, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:  .LBB46_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB51_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w.aq a4, (a2)
 ; RV32IA-NEXT:    and a6, a4, a3
 ; RV32IA-NEXT:    mv a5, a4
-; RV32IA-NEXT:    bgeu a6, a1, .LBB46_3
-; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB46_1 Depth=1
+; RV32IA-NEXT:    bgeu a6, a1, .LBB51_3
+; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB51_1 Depth=1
 ; RV32IA-NEXT:    xor a5, a4, a1
 ; RV32IA-NEXT:    and a5, a5, a3
 ; RV32IA-NEXT:    xor a5, a4, a5
-; RV32IA-NEXT:  .LBB46_3: # in Loop: Header=BB46_1 Depth=1
+; RV32IA-NEXT:  .LBB51_3: # in Loop: Header=BB51_1 Depth=1
 ; RV32IA-NEXT:    sc.w a5, a5, (a2)
-; RV32IA-NEXT:    bnez a5, .LBB46_1
+; RV32IA-NEXT:    bnez a5, .LBB51_1
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a4, a0
 ; RV32IA-NEXT:    ret
@@ -3670,9 +3983,9 @@ define i8 @atomicrmw_umax_i8_acquire(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    lbu a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    andi s2, a1, 255
-; RV64I-NEXT:    j .LBB46_2
-; RV64I-NEXT:  .LBB46_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB46_2 Depth=1
+; RV64I-NEXT:    j .LBB51_2
+; RV64I-NEXT:  .LBB51_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB51_2 Depth=1
 ; RV64I-NEXT:    sb a3, 15(sp)
 ; RV64I-NEXT:    addi a1, sp, 15
 ; RV64I-NEXT:    li a3, 2
@@ -3680,17 +3993,17 @@ define i8 @atomicrmw_umax_i8_acquire(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    lbu a3, 15(sp)
-; RV64I-NEXT:    bnez a0, .LBB46_4
-; RV64I-NEXT:  .LBB46_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB51_4
+; RV64I-NEXT:  .LBB51_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    andi a0, a3, 255
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    bltu s2, a0, .LBB46_1
+; RV64I-NEXT:    bltu s2, a0, .LBB51_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB46_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB51_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB46_1
-; RV64I-NEXT:  .LBB46_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB51_1
+; RV64I-NEXT:  .LBB51_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
@@ -3707,18 +4020,18 @@ define i8 @atomicrmw_umax_i8_acquire(ptr %a, i8 %b) nounwind {
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:  .LBB46_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB51_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w.aq a4, (a2)
 ; RV64IA-NEXT:    and a6, a4, a3
 ; RV64IA-NEXT:    mv a5, a4
-; RV64IA-NEXT:    bgeu a6, a1, .LBB46_3
-; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB46_1 Depth=1
+; RV64IA-NEXT:    bgeu a6, a1, .LBB51_3
+; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB51_1 Depth=1
 ; RV64IA-NEXT:    xor a5, a4, a1
 ; RV64IA-NEXT:    and a5, a5, a3
 ; RV64IA-NEXT:    xor a5, a4, a5
-; RV64IA-NEXT:  .LBB46_3: # in Loop: Header=BB46_1 Depth=1
+; RV64IA-NEXT:  .LBB51_3: # in Loop: Header=BB51_1 Depth=1
 ; RV64IA-NEXT:    sc.w a5, a5, (a2)
-; RV64IA-NEXT:    bnez a5, .LBB46_1
+; RV64IA-NEXT:    bnez a5, .LBB51_1
 ; RV64IA-NEXT:  # %bb.4:
 ; RV64IA-NEXT:    srlw a0, a4, a0
 ; RV64IA-NEXT:    ret
@@ -3738,9 +4051,9 @@ define i8 @atomicrmw_umax_i8_release(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    lbu a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
 ; RV32I-NEXT:    andi s2, a1, 255
-; RV32I-NEXT:    j .LBB47_2
-; RV32I-NEXT:  .LBB47_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB47_2 Depth=1
+; RV32I-NEXT:    j .LBB52_2
+; RV32I-NEXT:  .LBB52_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB52_2 Depth=1
 ; RV32I-NEXT:    sb a3, 15(sp)
 ; RV32I-NEXT:    addi a1, sp, 15
 ; RV32I-NEXT:    li a3, 3
@@ -3748,17 +4061,17 @@ define i8 @atomicrmw_umax_i8_release(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lbu a3, 15(sp)
-; RV32I-NEXT:    bnez a0, .LBB47_4
-; RV32I-NEXT:  .LBB47_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB52_4
+; RV32I-NEXT:  .LBB52_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    andi a0, a3, 255
 ; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    bltu s2, a0, .LBB47_1
+; RV32I-NEXT:    bltu s2, a0, .LBB52_1
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB47_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB52_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
-; RV32I-NEXT:    j .LBB47_1
-; RV32I-NEXT:  .LBB47_4: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB52_1
+; RV32I-NEXT:  .LBB52_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
@@ -3775,18 +4088,18 @@ define i8 @atomicrmw_umax_i8_release(ptr %a, i8 %b) nounwind {
 ; RV32IA-NEXT:    sll a3, a3, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:  .LBB47_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB52_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w a4, (a2)
 ; RV32IA-NEXT:    and a6, a4, a3
 ; RV32IA-NEXT:    mv a5, a4
-; RV32IA-NEXT:    bgeu a6, a1, .LBB47_3
-; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB47_1 Depth=1
+; RV32IA-NEXT:    bgeu a6, a1, .LBB52_3
+; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB52_1 Depth=1
 ; RV32IA-NEXT:    xor a5, a4, a1
 ; RV32IA-NEXT:    and a5, a5, a3
 ; RV32IA-NEXT:    xor a5, a4, a5
-; RV32IA-NEXT:  .LBB47_3: # in Loop: Header=BB47_1 Depth=1
+; RV32IA-NEXT:  .LBB52_3: # in Loop: Header=BB52_1 Depth=1
 ; RV32IA-NEXT:    sc.w.rl a5, a5, (a2)
-; RV32IA-NEXT:    bnez a5, .LBB47_1
+; RV32IA-NEXT:    bnez a5, .LBB52_1
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a4, a0
 ; RV32IA-NEXT:    ret
@@ -3802,9 +4115,9 @@ define i8 @atomicrmw_umax_i8_release(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    lbu a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    andi s2, a1, 255
-; RV64I-NEXT:    j .LBB47_2
-; RV64I-NEXT:  .LBB47_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB47_2 Depth=1
+; RV64I-NEXT:    j .LBB52_2
+; RV64I-NEXT:  .LBB52_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB52_2 Depth=1
 ; RV64I-NEXT:    sb a3, 15(sp)
 ; RV64I-NEXT:    addi a1, sp, 15
 ; RV64I-NEXT:    li a3, 3
@@ -3812,17 +4125,17 @@ define i8 @atomicrmw_umax_i8_release(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    lbu a3, 15(sp)
-; RV64I-NEXT:    bnez a0, .LBB47_4
-; RV64I-NEXT:  .LBB47_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB52_4
+; RV64I-NEXT:  .LBB52_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    andi a0, a3, 255
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    bltu s2, a0, .LBB47_1
+; RV64I-NEXT:    bltu s2, a0, .LBB52_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB47_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB52_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB47_1
-; RV64I-NEXT:  .LBB47_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB52_1
+; RV64I-NEXT:  .LBB52_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
@@ -3839,18 +4152,18 @@ define i8 @atomicrmw_umax_i8_release(ptr %a, i8 %b) nounwind {
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:  .LBB47_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB52_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w a4, (a2)
 ; RV64IA-NEXT:    and a6, a4, a3
 ; RV64IA-NEXT:    mv a5, a4
-; RV64IA-NEXT:    bgeu a6, a1, .LBB47_3
-; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB47_1 Depth=1
+; RV64IA-NEXT:    bgeu a6, a1, .LBB52_3
+; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB52_1 Depth=1
 ; RV64IA-NEXT:    xor a5, a4, a1
 ; RV64IA-NEXT:    and a5, a5, a3
 ; RV64IA-NEXT:    xor a5, a4, a5
-; RV64IA-NEXT:  .LBB47_3: # in Loop: Header=BB47_1 Depth=1
+; RV64IA-NEXT:  .LBB52_3: # in Loop: Header=BB52_1 Depth=1
 ; RV64IA-NEXT:    sc.w.rl a5, a5, (a2)
-; RV64IA-NEXT:    bnez a5, .LBB47_1
+; RV64IA-NEXT:    bnez a5, .LBB52_1
 ; RV64IA-NEXT:  # %bb.4:
 ; RV64IA-NEXT:    srlw a0, a4, a0
 ; RV64IA-NEXT:    ret
@@ -3870,9 +4183,9 @@ define i8 @atomicrmw_umax_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    lbu a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
 ; RV32I-NEXT:    andi s2, a1, 255
-; RV32I-NEXT:    j .LBB48_2
-; RV32I-NEXT:  .LBB48_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB48_2 Depth=1
+; RV32I-NEXT:    j .LBB53_2
+; RV32I-NEXT:  .LBB53_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB53_2 Depth=1
 ; RV32I-NEXT:    sb a3, 15(sp)
 ; RV32I-NEXT:    addi a1, sp, 15
 ; RV32I-NEXT:    li a3, 4
@@ -3880,17 +4193,17 @@ define i8 @atomicrmw_umax_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lbu a3, 15(sp)
-; RV32I-NEXT:    bnez a0, .LBB48_4
-; RV32I-NEXT:  .LBB48_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB53_4
+; RV32I-NEXT:  .LBB53_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    andi a0, a3, 255
 ; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    bltu s2, a0, .LBB48_1
+; RV32I-NEXT:    bltu s2, a0, .LBB53_1
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB48_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB53_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
-; RV32I-NEXT:    j .LBB48_1
-; RV32I-NEXT:  .LBB48_4: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB53_1
+; RV32I-NEXT:  .LBB53_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
@@ -3907,18 +4220,18 @@ define i8 @atomicrmw_umax_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ; RV32IA-NEXT:    sll a3, a3, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:  .LBB48_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB53_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w.aq a4, (a2)
 ; RV32IA-NEXT:    and a6, a4, a3
 ; RV32IA-NEXT:    mv a5, a4
-; RV32IA-NEXT:    bgeu a6, a1, .LBB48_3
-; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB48_1 Depth=1
+; RV32IA-NEXT:    bgeu a6, a1, .LBB53_3
+; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB53_1 Depth=1
 ; RV32IA-NEXT:    xor a5, a4, a1
 ; RV32IA-NEXT:    and a5, a5, a3
 ; RV32IA-NEXT:    xor a5, a4, a5
-; RV32IA-NEXT:  .LBB48_3: # in Loop: Header=BB48_1 Depth=1
+; RV32IA-NEXT:  .LBB53_3: # in Loop: Header=BB53_1 Depth=1
 ; RV32IA-NEXT:    sc.w.rl a5, a5, (a2)
-; RV32IA-NEXT:    bnez a5, .LBB48_1
+; RV32IA-NEXT:    bnez a5, .LBB53_1
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a4, a0
 ; RV32IA-NEXT:    ret
@@ -3934,9 +4247,9 @@ define i8 @atomicrmw_umax_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    lbu a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    andi s2, a1, 255
-; RV64I-NEXT:    j .LBB48_2
-; RV64I-NEXT:  .LBB48_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB48_2 Depth=1
+; RV64I-NEXT:    j .LBB53_2
+; RV64I-NEXT:  .LBB53_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB53_2 Depth=1
 ; RV64I-NEXT:    sb a3, 15(sp)
 ; RV64I-NEXT:    addi a1, sp, 15
 ; RV64I-NEXT:    li a3, 4
@@ -3944,17 +4257,17 @@ define i8 @atomicrmw_umax_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    lbu a3, 15(sp)
-; RV64I-NEXT:    bnez a0, .LBB48_4
-; RV64I-NEXT:  .LBB48_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB53_4
+; RV64I-NEXT:  .LBB53_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    andi a0, a3, 255
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    bltu s2, a0, .LBB48_1
+; RV64I-NEXT:    bltu s2, a0, .LBB53_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB48_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB53_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB48_1
-; RV64I-NEXT:  .LBB48_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB53_1
+; RV64I-NEXT:  .LBB53_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
@@ -3971,18 +4284,18 @@ define i8 @atomicrmw_umax_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:  .LBB48_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB53_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w.aq a4, (a2)
 ; RV64IA-NEXT:    and a6, a4, a3
 ; RV64IA-NEXT:    mv a5, a4
-; RV64IA-NEXT:    bgeu a6, a1, .LBB48_3
-; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB48_1 Depth=1
+; RV64IA-NEXT:    bgeu a6, a1, .LBB53_3
+; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB53_1 Depth=1
 ; RV64IA-NEXT:    xor a5, a4, a1
 ; RV64IA-NEXT:    and a5, a5, a3
 ; RV64IA-NEXT:    xor a5, a4, a5
-; RV64IA-NEXT:  .LBB48_3: # in Loop: Header=BB48_1 Depth=1
+; RV64IA-NEXT:  .LBB53_3: # in Loop: Header=BB53_1 Depth=1
 ; RV64IA-NEXT:    sc.w.rl a5, a5, (a2)
-; RV64IA-NEXT:    bnez a5, .LBB48_1
+; RV64IA-NEXT:    bnez a5, .LBB53_1
 ; RV64IA-NEXT:  # %bb.4:
 ; RV64IA-NEXT:    srlw a0, a4, a0
 ; RV64IA-NEXT:    ret
@@ -4002,27 +4315,27 @@ define i8 @atomicrmw_umax_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    lbu a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
 ; RV32I-NEXT:    andi s2, a1, 255
-; RV32I-NEXT:    j .LBB49_2
-; RV32I-NEXT:  .LBB49_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB49_2 Depth=1
-; RV32I-NEXT:    sb a3, 15(sp)
+; RV32I-NEXT:    j .LBB54_2
+; RV32I-NEXT:  .LBB54_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB54_2 Depth=1
+; RV32I-NEXT:    sb a3, 15(sp)
 ; RV32I-NEXT:    addi a1, sp, 15
 ; RV32I-NEXT:    li a3, 5
 ; RV32I-NEXT:    li a4, 5
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lbu a3, 15(sp)
-; RV32I-NEXT:    bnez a0, .LBB49_4
-; RV32I-NEXT:  .LBB49_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB54_4
+; RV32I-NEXT:  .LBB54_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    andi a0, a3, 255
 ; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    bltu s2, a0, .LBB49_1
+; RV32I-NEXT:    bltu s2, a0, .LBB54_1
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB49_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB54_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
-; RV32I-NEXT:    j .LBB49_1
-; RV32I-NEXT:  .LBB49_4: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB54_1
+; RV32I-NEXT:  .LBB54_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
@@ -4039,18 +4352,18 @@ define i8 @atomicrmw_umax_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; RV32IA-NEXT:    sll a3, a3, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:  .LBB49_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB54_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w.aqrl a4, (a2)
 ; RV32IA-NEXT:    and a6, a4, a3
 ; RV32IA-NEXT:    mv a5, a4
-; RV32IA-NEXT:    bgeu a6, a1, .LBB49_3
-; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB49_1 Depth=1
+; RV32IA-NEXT:    bgeu a6, a1, .LBB54_3
+; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB54_1 Depth=1
 ; RV32IA-NEXT:    xor a5, a4, a1
 ; RV32IA-NEXT:    and a5, a5, a3
 ; RV32IA-NEXT:    xor a5, a4, a5
-; RV32IA-NEXT:  .LBB49_3: # in Loop: Header=BB49_1 Depth=1
+; RV32IA-NEXT:  .LBB54_3: # in Loop: Header=BB54_1 Depth=1
 ; RV32IA-NEXT:    sc.w.rl a5, a5, (a2)
-; RV32IA-NEXT:    bnez a5, .LBB49_1
+; RV32IA-NEXT:    bnez a5, .LBB54_1
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a4, a0
 ; RV32IA-NEXT:    ret
@@ -4066,9 +4379,9 @@ define i8 @atomicrmw_umax_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    lbu a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    andi s2, a1, 255
-; RV64I-NEXT:    j .LBB49_2
-; RV64I-NEXT:  .LBB49_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB49_2 Depth=1
+; RV64I-NEXT:    j .LBB54_2
+; RV64I-NEXT:  .LBB54_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB54_2 Depth=1
 ; RV64I-NEXT:    sb a3, 15(sp)
 ; RV64I-NEXT:    addi a1, sp, 15
 ; RV64I-NEXT:    li a3, 5
@@ -4076,17 +4389,17 @@ define i8 @atomicrmw_umax_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    lbu a3, 15(sp)
-; RV64I-NEXT:    bnez a0, .LBB49_4
-; RV64I-NEXT:  .LBB49_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB54_4
+; RV64I-NEXT:  .LBB54_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    andi a0, a3, 255
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    bltu s2, a0, .LBB49_1
+; RV64I-NEXT:    bltu s2, a0, .LBB54_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB49_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB54_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB49_1
-; RV64I-NEXT:  .LBB49_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB54_1
+; RV64I-NEXT:  .LBB54_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
@@ -4103,18 +4416,18 @@ define i8 @atomicrmw_umax_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:  .LBB49_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB54_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w.aqrl a4, (a2)
 ; RV64IA-NEXT:    and a6, a4, a3
 ; RV64IA-NEXT:    mv a5, a4
-; RV64IA-NEXT:    bgeu a6, a1, .LBB49_3
-; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB49_1 Depth=1
+; RV64IA-NEXT:    bgeu a6, a1, .LBB54_3
+; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB54_1 Depth=1
 ; RV64IA-NEXT:    xor a5, a4, a1
 ; RV64IA-NEXT:    and a5, a5, a3
 ; RV64IA-NEXT:    xor a5, a4, a5
-; RV64IA-NEXT:  .LBB49_3: # in Loop: Header=BB49_1 Depth=1
+; RV64IA-NEXT:  .LBB54_3: # in Loop: Header=BB54_1 Depth=1
 ; RV64IA-NEXT:    sc.w.rl a5, a5, (a2)
-; RV64IA-NEXT:    bnez a5, .LBB49_1
+; RV64IA-NEXT:    bnez a5, .LBB54_1
 ; RV64IA-NEXT:  # %bb.4:
 ; RV64IA-NEXT:    srlw a0, a4, a0
 ; RV64IA-NEXT:    ret
@@ -4134,9 +4447,9 @@ define i8 @atomicrmw_umin_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    lbu a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
 ; RV32I-NEXT:    andi s2, a1, 255
-; RV32I-NEXT:    j .LBB50_2
-; RV32I-NEXT:  .LBB50_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB50_2 Depth=1
+; RV32I-NEXT:    j .LBB55_2
+; RV32I-NEXT:  .LBB55_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB55_2 Depth=1
 ; RV32I-NEXT:    sb a3, 15(sp)
 ; RV32I-NEXT:    addi a1, sp, 15
 ; RV32I-NEXT:    mv a0, s0
@@ -4144,17 +4457,17 @@ define i8 @atomicrmw_umin_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lbu a3, 15(sp)
-; RV32I-NEXT:    bnez a0, .LBB50_4
-; RV32I-NEXT:  .LBB50_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB55_4
+; RV32I-NEXT:  .LBB55_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    andi a0, a3, 255
 ; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    bgeu s2, a0, .LBB50_1
+; RV32I-NEXT:    bgeu s2, a0, .LBB55_1
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB50_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB55_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
-; RV32I-NEXT:    j .LBB50_1
-; RV32I-NEXT:  .LBB50_4: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB55_1
+; RV32I-NEXT:  .LBB55_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
@@ -4171,18 +4484,18 @@ define i8 @atomicrmw_umin_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV32IA-NEXT:    sll a3, a3, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:  .LBB50_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB55_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w a4, (a2)
 ; RV32IA-NEXT:    and a6, a4, a3
 ; RV32IA-NEXT:    mv a5, a4
-; RV32IA-NEXT:    bgeu a1, a6, .LBB50_3
-; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB50_1 Depth=1
+; RV32IA-NEXT:    bgeu a1, a6, .LBB55_3
+; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB55_1 Depth=1
 ; RV32IA-NEXT:    xor a5, a4, a1
 ; RV32IA-NEXT:    and a5, a5, a3
 ; RV32IA-NEXT:    xor a5, a4, a5
-; RV32IA-NEXT:  .LBB50_3: # in Loop: Header=BB50_1 Depth=1
+; RV32IA-NEXT:  .LBB55_3: # in Loop: Header=BB55_1 Depth=1
 ; RV32IA-NEXT:    sc.w a5, a5, (a2)
-; RV32IA-NEXT:    bnez a5, .LBB50_1
+; RV32IA-NEXT:    bnez a5, .LBB55_1
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a4, a0
 ; RV32IA-NEXT:    ret
@@ -4198,9 +4511,9 @@ define i8 @atomicrmw_umin_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    lbu a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    andi s2, a1, 255
-; RV64I-NEXT:    j .LBB50_2
-; RV64I-NEXT:  .LBB50_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB50_2 Depth=1
+; RV64I-NEXT:    j .LBB55_2
+; RV64I-NEXT:  .LBB55_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB55_2 Depth=1
 ; RV64I-NEXT:    sb a3, 15(sp)
 ; RV64I-NEXT:    addi a1, sp, 15
 ; RV64I-NEXT:    mv a0, s0
@@ -4208,17 +4521,17 @@ define i8 @atomicrmw_umin_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    lbu a3, 15(sp)
-; RV64I-NEXT:    bnez a0, .LBB50_4
-; RV64I-NEXT:  .LBB50_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB55_4
+; RV64I-NEXT:  .LBB55_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    andi a0, a3, 255
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    bgeu s2, a0, .LBB50_1
+; RV64I-NEXT:    bgeu s2, a0, .LBB55_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB50_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB55_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB50_1
-; RV64I-NEXT:  .LBB50_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB55_1
+; RV64I-NEXT:  .LBB55_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
@@ -4235,18 +4548,18 @@ define i8 @atomicrmw_umin_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:  .LBB50_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB55_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w a4, (a2)
 ; RV64IA-NEXT:    and a6, a4, a3
 ; RV64IA-NEXT:    mv a5, a4
-; RV64IA-NEXT:    bgeu a1, a6, .LBB50_3
-; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB50_1 Depth=1
+; RV64IA-NEXT:    bgeu a1, a6, .LBB55_3
+; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB55_1 Depth=1
 ; RV64IA-NEXT:    xor a5, a4, a1
 ; RV64IA-NEXT:    and a5, a5, a3
 ; RV64IA-NEXT:    xor a5, a4, a5
-; RV64IA-NEXT:  .LBB50_3: # in Loop: Header=BB50_1 Depth=1
+; RV64IA-NEXT:  .LBB55_3: # in Loop: Header=BB55_1 Depth=1
 ; RV64IA-NEXT:    sc.w a5, a5, (a2)
-; RV64IA-NEXT:    bnez a5, .LBB50_1
+; RV64IA-NEXT:    bnez a5, .LBB55_1
 ; RV64IA-NEXT:  # %bb.4:
 ; RV64IA-NEXT:    srlw a0, a4, a0
 ; RV64IA-NEXT:    ret
@@ -4266,9 +4579,9 @@ define i8 @atomicrmw_umin_i8_acquire(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    lbu a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
 ; RV32I-NEXT:    andi s2, a1, 255
-; RV32I-NEXT:    j .LBB51_2
-; RV32I-NEXT:  .LBB51_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB51_2 Depth=1
+; RV32I-NEXT:    j .LBB56_2
+; RV32I-NEXT:  .LBB56_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB56_2 Depth=1
 ; RV32I-NEXT:    sb a3, 15(sp)
 ; RV32I-NEXT:    addi a1, sp, 15
 ; RV32I-NEXT:    li a3, 2
@@ -4276,17 +4589,17 @@ define i8 @atomicrmw_umin_i8_acquire(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lbu a3, 15(sp)
-; RV32I-NEXT:    bnez a0, .LBB51_4
-; RV32I-NEXT:  .LBB51_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB56_4
+; RV32I-NEXT:  .LBB56_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    andi a0, a3, 255
 ; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    bgeu s2, a0, .LBB51_1
+; RV32I-NEXT:    bgeu s2, a0, .LBB56_1
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB51_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB56_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
-; RV32I-NEXT:    j .LBB51_1
-; RV32I-NEXT:  .LBB51_4: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB56_1
+; RV32I-NEXT:  .LBB56_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
@@ -4303,18 +4616,18 @@ define i8 @atomicrmw_umin_i8_acquire(ptr %a, i8 %b) nounwind {
 ; RV32IA-NEXT:    sll a3, a3, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:  .LBB51_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB56_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w.aq a4, (a2)
 ; RV32IA-NEXT:    and a6, a4, a3
 ; RV32IA-NEXT:    mv a5, a4
-; RV32IA-NEXT:    bgeu a1, a6, .LBB51_3
-; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB51_1 Depth=1
+; RV32IA-NEXT:    bgeu a1, a6, .LBB56_3
+; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB56_1 Depth=1
 ; RV32IA-NEXT:    xor a5, a4, a1
 ; RV32IA-NEXT:    and a5, a5, a3
 ; RV32IA-NEXT:    xor a5, a4, a5
-; RV32IA-NEXT:  .LBB51_3: # in Loop: Header=BB51_1 Depth=1
+; RV32IA-NEXT:  .LBB56_3: # in Loop: Header=BB56_1 Depth=1
 ; RV32IA-NEXT:    sc.w a5, a5, (a2)
-; RV32IA-NEXT:    bnez a5, .LBB51_1
+; RV32IA-NEXT:    bnez a5, .LBB56_1
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a4, a0
 ; RV32IA-NEXT:    ret
@@ -4330,9 +4643,9 @@ define i8 @atomicrmw_umin_i8_acquire(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    lbu a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    andi s2, a1, 255
-; RV64I-NEXT:    j .LBB51_2
-; RV64I-NEXT:  .LBB51_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB51_2 Depth=1
+; RV64I-NEXT:    j .LBB56_2
+; RV64I-NEXT:  .LBB56_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB56_2 Depth=1
 ; RV64I-NEXT:    sb a3, 15(sp)
 ; RV64I-NEXT:    addi a1, sp, 15
 ; RV64I-NEXT:    li a3, 2
@@ -4340,17 +4653,17 @@ define i8 @atomicrmw_umin_i8_acquire(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    lbu a3, 15(sp)
-; RV64I-NEXT:    bnez a0, .LBB51_4
-; RV64I-NEXT:  .LBB51_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB56_4
+; RV64I-NEXT:  .LBB56_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    andi a0, a3, 255
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    bgeu s2, a0, .LBB51_1
+; RV64I-NEXT:    bgeu s2, a0, .LBB56_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB51_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB56_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB51_1
-; RV64I-NEXT:  .LBB51_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB56_1
+; RV64I-NEXT:  .LBB56_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
@@ -4367,18 +4680,18 @@ define i8 @atomicrmw_umin_i8_acquire(ptr %a, i8 %b) nounwind {
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:  .LBB51_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB56_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w.aq a4, (a2)
 ; RV64IA-NEXT:    and a6, a4, a3
 ; RV64IA-NEXT:    mv a5, a4
-; RV64IA-NEXT:    bgeu a1, a6, .LBB51_3
-; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB51_1 Depth=1
+; RV64IA-NEXT:    bgeu a1, a6, .LBB56_3
+; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB56_1 Depth=1
 ; RV64IA-NEXT:    xor a5, a4, a1
 ; RV64IA-NEXT:    and a5, a5, a3
 ; RV64IA-NEXT:    xor a5, a4, a5
-; RV64IA-NEXT:  .LBB51_3: # in Loop: Header=BB51_1 Depth=1
+; RV64IA-NEXT:  .LBB56_3: # in Loop: Header=BB56_1 Depth=1
 ; RV64IA-NEXT:    sc.w a5, a5, (a2)
-; RV64IA-NEXT:    bnez a5, .LBB51_1
+; RV64IA-NEXT:    bnez a5, .LBB56_1
 ; RV64IA-NEXT:  # %bb.4:
 ; RV64IA-NEXT:    srlw a0, a4, a0
 ; RV64IA-NEXT:    ret
@@ -4398,9 +4711,9 @@ define i8 @atomicrmw_umin_i8_release(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    lbu a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
 ; RV32I-NEXT:    andi s2, a1, 255
-; RV32I-NEXT:    j .LBB52_2
-; RV32I-NEXT:  .LBB52_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB52_2 Depth=1
+; RV32I-NEXT:    j .LBB57_2
+; RV32I-NEXT:  .LBB57_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB57_2 Depth=1
 ; RV32I-NEXT:    sb a3, 15(sp)
 ; RV32I-NEXT:    addi a1, sp, 15
 ; RV32I-NEXT:    li a3, 3
@@ -4408,17 +4721,17 @@ define i8 @atomicrmw_umin_i8_release(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lbu a3, 15(sp)
-; RV32I-NEXT:    bnez a0, .LBB52_4
-; RV32I-NEXT:  .LBB52_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB57_4
+; RV32I-NEXT:  .LBB57_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    andi a0, a3, 255
 ; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    bgeu s2, a0, .LBB52_1
+; RV32I-NEXT:    bgeu s2, a0, .LBB57_1
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB52_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB57_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
-; RV32I-NEXT:    j .LBB52_1
-; RV32I-NEXT:  .LBB52_4: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB57_1
+; RV32I-NEXT:  .LBB57_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
@@ -4435,18 +4748,18 @@ define i8 @atomicrmw_umin_i8_release(ptr %a, i8 %b) nounwind {
 ; RV32IA-NEXT:    sll a3, a3, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:  .LBB52_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB57_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w a4, (a2)
 ; RV32IA-NEXT:    and a6, a4, a3
 ; RV32IA-NEXT:    mv a5, a4
-; RV32IA-NEXT:    bgeu a1, a6, .LBB52_3
-; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB52_1 Depth=1
+; RV32IA-NEXT:    bgeu a1, a6, .LBB57_3
+; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB57_1 Depth=1
 ; RV32IA-NEXT:    xor a5, a4, a1
 ; RV32IA-NEXT:    and a5, a5, a3
 ; RV32IA-NEXT:    xor a5, a4, a5
-; RV32IA-NEXT:  .LBB52_3: # in Loop: Header=BB52_1 Depth=1
+; RV32IA-NEXT:  .LBB57_3: # in Loop: Header=BB57_1 Depth=1
 ; RV32IA-NEXT:    sc.w.rl a5, a5, (a2)
-; RV32IA-NEXT:    bnez a5, .LBB52_1
+; RV32IA-NEXT:    bnez a5, .LBB57_1
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a4, a0
 ; RV32IA-NEXT:    ret
@@ -4462,9 +4775,9 @@ define i8 @atomicrmw_umin_i8_release(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    lbu a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    andi s2, a1, 255
-; RV64I-NEXT:    j .LBB52_2
-; RV64I-NEXT:  .LBB52_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB52_2 Depth=1
+; RV64I-NEXT:    j .LBB57_2
+; RV64I-NEXT:  .LBB57_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB57_2 Depth=1
 ; RV64I-NEXT:    sb a3, 15(sp)
 ; RV64I-NEXT:    addi a1, sp, 15
 ; RV64I-NEXT:    li a3, 3
@@ -4472,17 +4785,17 @@ define i8 @atomicrmw_umin_i8_release(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    lbu a3, 15(sp)
-; RV64I-NEXT:    bnez a0, .LBB52_4
-; RV64I-NEXT:  .LBB52_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB57_4
+; RV64I-NEXT:  .LBB57_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    andi a0, a3, 255
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    bgeu s2, a0, .LBB52_1
+; RV64I-NEXT:    bgeu s2, a0, .LBB57_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB52_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB57_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB52_1
-; RV64I-NEXT:  .LBB52_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB57_1
+; RV64I-NEXT:  .LBB57_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
@@ -4499,18 +4812,18 @@ define i8 @atomicrmw_umin_i8_release(ptr %a, i8 %b) nounwind {
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:  .LBB52_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB57_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w a4, (a2)
 ; RV64IA-NEXT:    and a6, a4, a3
 ; RV64IA-NEXT:    mv a5, a4
-; RV64IA-NEXT:    bgeu a1, a6, .LBB52_3
-; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB52_1 Depth=1
+; RV64IA-NEXT:    bgeu a1, a6, .LBB57_3
+; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB57_1 Depth=1
 ; RV64IA-NEXT:    xor a5, a4, a1
 ; RV64IA-NEXT:    and a5, a5, a3
 ; RV64IA-NEXT:    xor a5, a4, a5
-; RV64IA-NEXT:  .LBB52_3: # in Loop: Header=BB52_1 Depth=1
+; RV64IA-NEXT:  .LBB57_3: # in Loop: Header=BB57_1 Depth=1
 ; RV64IA-NEXT:    sc.w.rl a5, a5, (a2)
-; RV64IA-NEXT:    bnez a5, .LBB52_1
+; RV64IA-NEXT:    bnez a5, .LBB57_1
 ; RV64IA-NEXT:  # %bb.4:
 ; RV64IA-NEXT:    srlw a0, a4, a0
 ; RV64IA-NEXT:    ret
@@ -4530,9 +4843,9 @@ define i8 @atomicrmw_umin_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    lbu a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
 ; RV32I-NEXT:    andi s2, a1, 255
-; RV32I-NEXT:    j .LBB53_2
-; RV32I-NEXT:  .LBB53_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB53_2 Depth=1
+; RV32I-NEXT:    j .LBB58_2
+; RV32I-NEXT:  .LBB58_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB58_2 Depth=1
 ; RV32I-NEXT:    sb a3, 15(sp)
 ; RV32I-NEXT:    addi a1, sp, 15
 ; RV32I-NEXT:    li a3, 4
@@ -4540,17 +4853,17 @@ define i8 @atomicrmw_umin_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lbu a3, 15(sp)
-; RV32I-NEXT:    bnez a0, .LBB53_4
-; RV32I-NEXT:  .LBB53_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB58_4
+; RV32I-NEXT:  .LBB58_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    andi a0, a3, 255
 ; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    bgeu s2, a0, .LBB53_1
+; RV32I-NEXT:    bgeu s2, a0, .LBB58_1
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB53_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB58_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
-; RV32I-NEXT:    j .LBB53_1
-; RV32I-NEXT:  .LBB53_4: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB58_1
+; RV32I-NEXT:  .LBB58_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
@@ -4567,18 +4880,18 @@ define i8 @atomicrmw_umin_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ; RV32IA-NEXT:    sll a3, a3, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:  .LBB53_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB58_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w.aq a4, (a2)
 ; RV32IA-NEXT:    and a6, a4, a3
 ; RV32IA-NEXT:    mv a5, a4
-; RV32IA-NEXT:    bgeu a1, a6, .LBB53_3
-; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB53_1 Depth=1
+; RV32IA-NEXT:    bgeu a1, a6, .LBB58_3
+; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB58_1 Depth=1
 ; RV32IA-NEXT:    xor a5, a4, a1
 ; RV32IA-NEXT:    and a5, a5, a3
 ; RV32IA-NEXT:    xor a5, a4, a5
-; RV32IA-NEXT:  .LBB53_3: # in Loop: Header=BB53_1 Depth=1
+; RV32IA-NEXT:  .LBB58_3: # in Loop: Header=BB58_1 Depth=1
 ; RV32IA-NEXT:    sc.w.rl a5, a5, (a2)
-; RV32IA-NEXT:    bnez a5, .LBB53_1
+; RV32IA-NEXT:    bnez a5, .LBB58_1
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a4, a0
 ; RV32IA-NEXT:    ret
@@ -4594,9 +4907,9 @@ define i8 @atomicrmw_umin_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    lbu a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    andi s2, a1, 255
-; RV64I-NEXT:    j .LBB53_2
-; RV64I-NEXT:  .LBB53_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB53_2 Depth=1
+; RV64I-NEXT:    j .LBB58_2
+; RV64I-NEXT:  .LBB58_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB58_2 Depth=1
 ; RV64I-NEXT:    sb a3, 15(sp)
 ; RV64I-NEXT:    addi a1, sp, 15
 ; RV64I-NEXT:    li a3, 4
@@ -4604,17 +4917,17 @@ define i8 @atomicrmw_umin_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    lbu a3, 15(sp)
-; RV64I-NEXT:    bnez a0, .LBB53_4
-; RV64I-NEXT:  .LBB53_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB58_4
+; RV64I-NEXT:  .LBB58_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    andi a0, a3, 255
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    bgeu s2, a0, .LBB53_1
+; RV64I-NEXT:    bgeu s2, a0, .LBB58_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB53_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB58_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB53_1
-; RV64I-NEXT:  .LBB53_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB58_1
+; RV64I-NEXT:  .LBB58_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
@@ -4631,18 +4944,18 @@ define i8 @atomicrmw_umin_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:  .LBB53_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB58_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w.aq a4, (a2)
 ; RV64IA-NEXT:    and a6, a4, a3
 ; RV64IA-NEXT:    mv a5, a4
-; RV64IA-NEXT:    bgeu a1, a6, .LBB53_3
-; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB53_1 Depth=1
+; RV64IA-NEXT:    bgeu a1, a6, .LBB58_3
+; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB58_1 Depth=1
 ; RV64IA-NEXT:    xor a5, a4, a1
 ; RV64IA-NEXT:    and a5, a5, a3
 ; RV64IA-NEXT:    xor a5, a4, a5
-; RV64IA-NEXT:  .LBB53_3: # in Loop: Header=BB53_1 Depth=1
+; RV64IA-NEXT:  .LBB58_3: # in Loop: Header=BB58_1 Depth=1
 ; RV64IA-NEXT:    sc.w.rl a5, a5, (a2)
-; RV64IA-NEXT:    bnez a5, .LBB53_1
+; RV64IA-NEXT:    bnez a5, .LBB58_1
 ; RV64IA-NEXT:  # %bb.4:
 ; RV64IA-NEXT:    srlw a0, a4, a0
 ; RV64IA-NEXT:    ret
@@ -4662,9 +4975,9 @@ define i8 @atomicrmw_umin_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    lbu a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
 ; RV32I-NEXT:    andi s2, a1, 255
-; RV32I-NEXT:    j .LBB54_2
-; RV32I-NEXT:  .LBB54_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB54_2 Depth=1
+; RV32I-NEXT:    j .LBB59_2
+; RV32I-NEXT:  .LBB59_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB59_2 Depth=1
 ; RV32I-NEXT:    sb a3, 15(sp)
 ; RV32I-NEXT:    addi a1, sp, 15
 ; RV32I-NEXT:    li a3, 5
@@ -4672,17 +4985,17 @@ define i8 @atomicrmw_umin_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lbu a3, 15(sp)
-; RV32I-NEXT:    bnez a0, .LBB54_4
-; RV32I-NEXT:  .LBB54_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB59_4
+; RV32I-NEXT:  .LBB59_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    andi a0, a3, 255
 ; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    bgeu s2, a0, .LBB54_1
+; RV32I-NEXT:    bgeu s2, a0, .LBB59_1
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB54_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB59_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
-; RV32I-NEXT:    j .LBB54_1
-; RV32I-NEXT:  .LBB54_4: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB59_1
+; RV32I-NEXT:  .LBB59_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
@@ -4699,18 +5012,18 @@ define i8 @atomicrmw_umin_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; RV32IA-NEXT:    sll a3, a3, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:  .LBB54_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB59_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w.aqrl a4, (a2)
 ; RV32IA-NEXT:    and a6, a4, a3
 ; RV32IA-NEXT:    mv a5, a4
-; RV32IA-NEXT:    bgeu a1, a6, .LBB54_3
-; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB54_1 Depth=1
+; RV32IA-NEXT:    bgeu a1, a6, .LBB59_3
+; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB59_1 Depth=1
 ; RV32IA-NEXT:    xor a5, a4, a1
 ; RV32IA-NEXT:    and a5, a5, a3
 ; RV32IA-NEXT:    xor a5, a4, a5
-; RV32IA-NEXT:  .LBB54_3: # in Loop: Header=BB54_1 Depth=1
+; RV32IA-NEXT:  .LBB59_3: # in Loop: Header=BB59_1 Depth=1
 ; RV32IA-NEXT:    sc.w.rl a5, a5, (a2)
-; RV32IA-NEXT:    bnez a5, .LBB54_1
+; RV32IA-NEXT:    bnez a5, .LBB59_1
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a4, a0
 ; RV32IA-NEXT:    ret
@@ -4726,9 +5039,9 @@ define i8 @atomicrmw_umin_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    lbu a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    andi s2, a1, 255
-; RV64I-NEXT:    j .LBB54_2
-; RV64I-NEXT:  .LBB54_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB54_2 Depth=1
+; RV64I-NEXT:    j .LBB59_2
+; RV64I-NEXT:  .LBB59_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB59_2 Depth=1
 ; RV64I-NEXT:    sb a3, 15(sp)
 ; RV64I-NEXT:    addi a1, sp, 15
 ; RV64I-NEXT:    li a3, 5
@@ -4736,17 +5049,17 @@ define i8 @atomicrmw_umin_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    lbu a3, 15(sp)
-; RV64I-NEXT:    bnez a0, .LBB54_4
-; RV64I-NEXT:  .LBB54_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB59_4
+; RV64I-NEXT:  .LBB59_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    andi a0, a3, 255
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    bgeu s2, a0, .LBB54_1
+; RV64I-NEXT:    bgeu s2, a0, .LBB59_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB54_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB59_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB54_1
-; RV64I-NEXT:  .LBB54_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB59_1
+; RV64I-NEXT:  .LBB59_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
@@ -4763,352 +5076,675 @@ define i8 @atomicrmw_umin_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:  .LBB54_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB59_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w.aqrl a4, (a2)
 ; RV64IA-NEXT:    and a6, a4, a3
 ; RV64IA-NEXT:    mv a5, a4
-; RV64IA-NEXT:    bgeu a1, a6, .LBB54_3
-; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB54_1 Depth=1
+; RV64IA-NEXT:    bgeu a1, a6, .LBB59_3
+; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB59_1 Depth=1
 ; RV64IA-NEXT:    xor a5, a4, a1
 ; RV64IA-NEXT:    and a5, a5, a3
 ; RV64IA-NEXT:    xor a5, a4, a5
-; RV64IA-NEXT:  .LBB54_3: # in Loop: Header=BB54_1 Depth=1
+; RV64IA-NEXT:  .LBB59_3: # in Loop: Header=BB59_1 Depth=1
+; RV64IA-NEXT:    sc.w.rl a5, a5, (a2)
+; RV64IA-NEXT:    bnez a5, .LBB59_1
+; RV64IA-NEXT:  # %bb.4:
+; RV64IA-NEXT:    srlw a0, a4, a0
+; RV64IA-NEXT:    ret
+  %1 = atomicrmw umin ptr %a, i8 %b seq_cst
+  ret i8 %1
+}
+
+define i16 @atomicrmw_xchg_i16_monotonic(ptr %a, i16 %b) nounwind {
+; RV32I-LABEL: atomicrmw_xchg_i16_monotonic:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    li a2, 0
+; RV32I-NEXT:    call __atomic_exchange_2 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV32IA-LABEL: atomicrmw_xchg_i16_monotonic:
+; RV32IA:       # %bb.0:
+; RV32IA-NEXT:    andi a2, a0, -4
+; RV32IA-NEXT:    slli a0, a0, 3
+; RV32IA-NEXT:    lui a3, 16
+; RV32IA-NEXT:    addi a3, a3, -1
+; RV32IA-NEXT:    sll a4, a3, a0
+; RV32IA-NEXT:    and a1, a1, a3
+; RV32IA-NEXT:    sll a1, a1, a0
+; RV32IA-NEXT:  .LBB60_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:    lr.w a3, (a2)
+; RV32IA-NEXT:    mv a5, a1
+; RV32IA-NEXT:    xor a5, a3, a5
+; RV32IA-NEXT:    and a5, a5, a4
+; RV32IA-NEXT:    xor a5, a3, a5
+; RV32IA-NEXT:    sc.w a5, a5, (a2)
+; RV32IA-NEXT:    bnez a5, .LBB60_1
+; RV32IA-NEXT:  # %bb.2:
+; RV32IA-NEXT:    srl a0, a3, a0
+; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xchg_i16_monotonic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    li a2, 0
+; RV64I-NEXT:    call __atomic_exchange_2 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+;
+; RV64IA-LABEL: atomicrmw_xchg_i16_monotonic:
+; RV64IA:       # %bb.0:
+; RV64IA-NEXT:    andi a2, a0, -4
+; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    lui a3, 16
+; RV64IA-NEXT:    addiw a3, a3, -1
+; RV64IA-NEXT:    sllw a4, a3, a0
+; RV64IA-NEXT:    and a1, a1, a3
+; RV64IA-NEXT:    sllw a1, a1, a0
+; RV64IA-NEXT:  .LBB60_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:    lr.w a3, (a2)
+; RV64IA-NEXT:    mv a5, a1
+; RV64IA-NEXT:    xor a5, a3, a5
+; RV64IA-NEXT:    and a5, a5, a4
+; RV64IA-NEXT:    xor a5, a3, a5
+; RV64IA-NEXT:    sc.w a5, a5, (a2)
+; RV64IA-NEXT:    bnez a5, .LBB60_1
+; RV64IA-NEXT:  # %bb.2:
+; RV64IA-NEXT:    srlw a0, a3, a0
+; RV64IA-NEXT:    ret
+  %1 = atomicrmw xchg ptr %a, i16 %b monotonic
+  ret i16 %1
+}
+
+define i16 @atomicrmw_xchg_i16_acquire(ptr %a, i16 %b) nounwind {
+; RV32I-LABEL: atomicrmw_xchg_i16_acquire:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    li a2, 2
+; RV32I-NEXT:    call __atomic_exchange_2 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV32IA-LABEL: atomicrmw_xchg_i16_acquire:
+; RV32IA:       # %bb.0:
+; RV32IA-NEXT:    andi a2, a0, -4
+; RV32IA-NEXT:    slli a0, a0, 3
+; RV32IA-NEXT:    lui a3, 16
+; RV32IA-NEXT:    addi a3, a3, -1
+; RV32IA-NEXT:    sll a4, a3, a0
+; RV32IA-NEXT:    and a1, a1, a3
+; RV32IA-NEXT:    sll a1, a1, a0
+; RV32IA-NEXT:  .LBB61_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:    lr.w.aq a3, (a2)
+; RV32IA-NEXT:    mv a5, a1
+; RV32IA-NEXT:    xor a5, a3, a5
+; RV32IA-NEXT:    and a5, a5, a4
+; RV32IA-NEXT:    xor a5, a3, a5
+; RV32IA-NEXT:    sc.w a5, a5, (a2)
+; RV32IA-NEXT:    bnez a5, .LBB61_1
+; RV32IA-NEXT:  # %bb.2:
+; RV32IA-NEXT:    srl a0, a3, a0
+; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xchg_i16_acquire:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    li a2, 2
+; RV64I-NEXT:    call __atomic_exchange_2 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+;
+; RV64IA-LABEL: atomicrmw_xchg_i16_acquire:
+; RV64IA:       # %bb.0:
+; RV64IA-NEXT:    andi a2, a0, -4
+; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    lui a3, 16
+; RV64IA-NEXT:    addiw a3, a3, -1
+; RV64IA-NEXT:    sllw a4, a3, a0
+; RV64IA-NEXT:    and a1, a1, a3
+; RV64IA-NEXT:    sllw a1, a1, a0
+; RV64IA-NEXT:  .LBB61_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:    lr.w.aq a3, (a2)
+; RV64IA-NEXT:    mv a5, a1
+; RV64IA-NEXT:    xor a5, a3, a5
+; RV64IA-NEXT:    and a5, a5, a4
+; RV64IA-NEXT:    xor a5, a3, a5
+; RV64IA-NEXT:    sc.w a5, a5, (a2)
+; RV64IA-NEXT:    bnez a5, .LBB61_1
+; RV64IA-NEXT:  # %bb.2:
+; RV64IA-NEXT:    srlw a0, a3, a0
+; RV64IA-NEXT:    ret
+  %1 = atomicrmw xchg ptr %a, i16 %b acquire
+  ret i16 %1
+}
+
+define i16 @atomicrmw_xchg_i16_release(ptr %a, i16 %b) nounwind {
+; RV32I-LABEL: atomicrmw_xchg_i16_release:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    li a2, 3
+; RV32I-NEXT:    call __atomic_exchange_2 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV32IA-LABEL: atomicrmw_xchg_i16_release:
+; RV32IA:       # %bb.0:
+; RV32IA-NEXT:    andi a2, a0, -4
+; RV32IA-NEXT:    slli a0, a0, 3
+; RV32IA-NEXT:    lui a3, 16
+; RV32IA-NEXT:    addi a3, a3, -1
+; RV32IA-NEXT:    sll a4, a3, a0
+; RV32IA-NEXT:    and a1, a1, a3
+; RV32IA-NEXT:    sll a1, a1, a0
+; RV32IA-NEXT:  .LBB62_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:    lr.w a3, (a2)
+; RV32IA-NEXT:    mv a5, a1
+; RV32IA-NEXT:    xor a5, a3, a5
+; RV32IA-NEXT:    and a5, a5, a4
+; RV32IA-NEXT:    xor a5, a3, a5
+; RV32IA-NEXT:    sc.w.rl a5, a5, (a2)
+; RV32IA-NEXT:    bnez a5, .LBB62_1
+; RV32IA-NEXT:  # %bb.2:
+; RV32IA-NEXT:    srl a0, a3, a0
+; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xchg_i16_release:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    li a2, 3
+; RV64I-NEXT:    call __atomic_exchange_2 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+;
+; RV64IA-LABEL: atomicrmw_xchg_i16_release:
+; RV64IA:       # %bb.0:
+; RV64IA-NEXT:    andi a2, a0, -4
+; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    lui a3, 16
+; RV64IA-NEXT:    addiw a3, a3, -1
+; RV64IA-NEXT:    sllw a4, a3, a0
+; RV64IA-NEXT:    and a1, a1, a3
+; RV64IA-NEXT:    sllw a1, a1, a0
+; RV64IA-NEXT:  .LBB62_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:    lr.w a3, (a2)
+; RV64IA-NEXT:    mv a5, a1
+; RV64IA-NEXT:    xor a5, a3, a5
+; RV64IA-NEXT:    and a5, a5, a4
+; RV64IA-NEXT:    xor a5, a3, a5
+; RV64IA-NEXT:    sc.w.rl a5, a5, (a2)
+; RV64IA-NEXT:    bnez a5, .LBB62_1
+; RV64IA-NEXT:  # %bb.2:
+; RV64IA-NEXT:    srlw a0, a3, a0
+; RV64IA-NEXT:    ret
+  %1 = atomicrmw xchg ptr %a, i16 %b release
+  ret i16 %1
+}
+
+define i16 @atomicrmw_xchg_i16_acq_rel(ptr %a, i16 %b) nounwind {
+; RV32I-LABEL: atomicrmw_xchg_i16_acq_rel:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    li a2, 4
+; RV32I-NEXT:    call __atomic_exchange_2 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV32IA-LABEL: atomicrmw_xchg_i16_acq_rel:
+; RV32IA:       # %bb.0:
+; RV32IA-NEXT:    andi a2, a0, -4
+; RV32IA-NEXT:    slli a0, a0, 3
+; RV32IA-NEXT:    lui a3, 16
+; RV32IA-NEXT:    addi a3, a3, -1
+; RV32IA-NEXT:    sll a4, a3, a0
+; RV32IA-NEXT:    and a1, a1, a3
+; RV32IA-NEXT:    sll a1, a1, a0
+; RV32IA-NEXT:  .LBB63_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:    lr.w.aq a3, (a2)
+; RV32IA-NEXT:    mv a5, a1
+; RV32IA-NEXT:    xor a5, a3, a5
+; RV32IA-NEXT:    and a5, a5, a4
+; RV32IA-NEXT:    xor a5, a3, a5
+; RV32IA-NEXT:    sc.w.rl a5, a5, (a2)
+; RV32IA-NEXT:    bnez a5, .LBB63_1
+; RV32IA-NEXT:  # %bb.2:
+; RV32IA-NEXT:    srl a0, a3, a0
+; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xchg_i16_acq_rel:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    li a2, 4
+; RV64I-NEXT:    call __atomic_exchange_2 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+;
+; RV64IA-LABEL: atomicrmw_xchg_i16_acq_rel:
+; RV64IA:       # %bb.0:
+; RV64IA-NEXT:    andi a2, a0, -4
+; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    lui a3, 16
+; RV64IA-NEXT:    addiw a3, a3, -1
+; RV64IA-NEXT:    sllw a4, a3, a0
+; RV64IA-NEXT:    and a1, a1, a3
+; RV64IA-NEXT:    sllw a1, a1, a0
+; RV64IA-NEXT:  .LBB63_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:    lr.w.aq a3, (a2)
+; RV64IA-NEXT:    mv a5, a1
+; RV64IA-NEXT:    xor a5, a3, a5
+; RV64IA-NEXT:    and a5, a5, a4
+; RV64IA-NEXT:    xor a5, a3, a5
+; RV64IA-NEXT:    sc.w.rl a5, a5, (a2)
+; RV64IA-NEXT:    bnez a5, .LBB63_1
+; RV64IA-NEXT:  # %bb.2:
+; RV64IA-NEXT:    srlw a0, a3, a0
+; RV64IA-NEXT:    ret
+  %1 = atomicrmw xchg ptr %a, i16 %b acq_rel
+  ret i16 %1
+}
+
+define i16 @atomicrmw_xchg_i16_seq_cst(ptr %a, i16 %b) nounwind {
+; RV32I-LABEL: atomicrmw_xchg_i16_seq_cst:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    li a2, 5
+; RV32I-NEXT:    call __atomic_exchange_2 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV32IA-LABEL: atomicrmw_xchg_i16_seq_cst:
+; RV32IA:       # %bb.0:
+; RV32IA-NEXT:    andi a2, a0, -4
+; RV32IA-NEXT:    slli a0, a0, 3
+; RV32IA-NEXT:    lui a3, 16
+; RV32IA-NEXT:    addi a3, a3, -1
+; RV32IA-NEXT:    sll a4, a3, a0
+; RV32IA-NEXT:    and a1, a1, a3
+; RV32IA-NEXT:    sll a1, a1, a0
+; RV32IA-NEXT:  .LBB64_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:    lr.w.aqrl a3, (a2)
+; RV32IA-NEXT:    mv a5, a1
+; RV32IA-NEXT:    xor a5, a3, a5
+; RV32IA-NEXT:    and a5, a5, a4
+; RV32IA-NEXT:    xor a5, a3, a5
+; RV32IA-NEXT:    sc.w.rl a5, a5, (a2)
+; RV32IA-NEXT:    bnez a5, .LBB64_1
+; RV32IA-NEXT:  # %bb.2:
+; RV32IA-NEXT:    srl a0, a3, a0
+; RV32IA-NEXT:    ret
+;
+; RV64I-LABEL: atomicrmw_xchg_i16_seq_cst:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    li a2, 5
+; RV64I-NEXT:    call __atomic_exchange_2 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+;
+; RV64IA-LABEL: atomicrmw_xchg_i16_seq_cst:
+; RV64IA:       # %bb.0:
+; RV64IA-NEXT:    andi a2, a0, -4
+; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    lui a3, 16
+; RV64IA-NEXT:    addiw a3, a3, -1
+; RV64IA-NEXT:    sllw a4, a3, a0
+; RV64IA-NEXT:    and a1, a1, a3
+; RV64IA-NEXT:    sllw a1, a1, a0
+; RV64IA-NEXT:  .LBB64_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:    lr.w.aqrl a3, (a2)
+; RV64IA-NEXT:    mv a5, a1
+; RV64IA-NEXT:    xor a5, a3, a5
+; RV64IA-NEXT:    and a5, a5, a4
+; RV64IA-NEXT:    xor a5, a3, a5
 ; RV64IA-NEXT:    sc.w.rl a5, a5, (a2)
-; RV64IA-NEXT:    bnez a5, .LBB54_1
-; RV64IA-NEXT:  # %bb.4:
-; RV64IA-NEXT:    srlw a0, a4, a0
+; RV64IA-NEXT:    bnez a5, .LBB64_1
+; RV64IA-NEXT:  # %bb.2:
+; RV64IA-NEXT:    srlw a0, a3, a0
 ; RV64IA-NEXT:    ret
-  %1 = atomicrmw umin ptr %a, i8 %b seq_cst
-  ret i8 %1
+  %1 = atomicrmw xchg ptr %a, i16 %b seq_cst
+  ret i16 %1
 }
 
-define i16 @atomicrmw_xchg_i16_monotonic(ptr %a, i16 %b) nounwind {
-; RV32I-LABEL: atomicrmw_xchg_i16_monotonic:
+; TODO: The following 'atomicrmw xchg a, 0` cases can be lowered to an amoand
+; with appropriate mask.
+
+define i16 @atomicrmw_xchg_0_i16_monotonic(ptr %a) nounwind {
+; RV32I-LABEL: atomicrmw_xchg_0_i16_monotonic:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    call __atomic_exchange_2 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
-; RV32IA-LABEL: atomicrmw_xchg_i16_monotonic:
+; RV32IA-LABEL: atomicrmw_xchg_0_i16_monotonic:
 ; RV32IA:       # %bb.0:
-; RV32IA-NEXT:    andi a2, a0, -4
+; RV32IA-NEXT:    andi a1, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    lui a3, 16
-; RV32IA-NEXT:    addi a3, a3, -1
-; RV32IA-NEXT:    sll a4, a3, a0
-; RV32IA-NEXT:    and a1, a1, a3
-; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:  .LBB55_1: # =>This Inner Loop Header: Depth=1
-; RV32IA-NEXT:    lr.w a3, (a2)
-; RV32IA-NEXT:    mv a5, a1
-; RV32IA-NEXT:    xor a5, a3, a5
-; RV32IA-NEXT:    and a5, a5, a4
-; RV32IA-NEXT:    xor a5, a3, a5
-; RV32IA-NEXT:    sc.w a5, a5, (a2)
-; RV32IA-NEXT:    bnez a5, .LBB55_1
+; RV32IA-NEXT:    lui a2, 16
+; RV32IA-NEXT:    addi a2, a2, -1
+; RV32IA-NEXT:    sll a2, a2, a0
+; RV32IA-NEXT:  .LBB65_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:    lr.w a3, (a1)
+; RV32IA-NEXT:    li a4, 0
+; RV32IA-NEXT:    xor a4, a3, a4
+; RV32IA-NEXT:    and a4, a4, a2
+; RV32IA-NEXT:    xor a4, a3, a4
+; RV32IA-NEXT:    sc.w a4, a4, (a1)
+; RV32IA-NEXT:    bnez a4, .LBB65_1
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a3, a0
 ; RV32IA-NEXT:    ret
 ;
-; RV64I-LABEL: atomicrmw_xchg_i16_monotonic:
+; RV64I-LABEL: atomicrmw_xchg_0_i16_monotonic:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    li a2, 0
 ; RV64I-NEXT:    call __atomic_exchange_2 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
-; RV64IA-LABEL: atomicrmw_xchg_i16_monotonic:
+; RV64IA-LABEL: atomicrmw_xchg_0_i16_monotonic:
 ; RV64IA:       # %bb.0:
-; RV64IA-NEXT:    andi a2, a0, -4
+; RV64IA-NEXT:    andi a1, a0, -4
 ; RV64IA-NEXT:    slli a0, a0, 3
-; RV64IA-NEXT:    lui a3, 16
-; RV64IA-NEXT:    addiw a3, a3, -1
-; RV64IA-NEXT:    sllw a4, a3, a0
-; RV64IA-NEXT:    and a1, a1, a3
-; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:  .LBB55_1: # =>This Inner Loop Header: Depth=1
-; RV64IA-NEXT:    lr.w a3, (a2)
-; RV64IA-NEXT:    mv a5, a1
-; RV64IA-NEXT:    xor a5, a3, a5
-; RV64IA-NEXT:    and a5, a5, a4
-; RV64IA-NEXT:    xor a5, a3, a5
-; RV64IA-NEXT:    sc.w a5, a5, (a2)
-; RV64IA-NEXT:    bnez a5, .LBB55_1
+; RV64IA-NEXT:    lui a2, 16
+; RV64IA-NEXT:    addiw a2, a2, -1
+; RV64IA-NEXT:    sllw a2, a2, a0
+; RV64IA-NEXT:  .LBB65_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:    lr.w a3, (a1)
+; RV64IA-NEXT:    li a4, 0
+; RV64IA-NEXT:    xor a4, a3, a4
+; RV64IA-NEXT:    and a4, a4, a2
+; RV64IA-NEXT:    xor a4, a3, a4
+; RV64IA-NEXT:    sc.w a4, a4, (a1)
+; RV64IA-NEXT:    bnez a4, .LBB65_1
 ; RV64IA-NEXT:  # %bb.2:
 ; RV64IA-NEXT:    srlw a0, a3, a0
 ; RV64IA-NEXT:    ret
-  %1 = atomicrmw xchg ptr %a, i16 %b monotonic
+  %1 = atomicrmw xchg ptr %a, i16 0 monotonic
   ret i16 %1
 }
 
-define i16 @atomicrmw_xchg_i16_acquire(ptr %a, i16 %b) nounwind {
-; RV32I-LABEL: atomicrmw_xchg_i16_acquire:
+define i16 @atomicrmw_xchg_0_i16_acquire(ptr %a) nounwind {
+; RV32I-LABEL: atomicrmw_xchg_0_i16_acquire:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 2
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __atomic_exchange_2 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
-; RV32IA-LABEL: atomicrmw_xchg_i16_acquire:
+; RV32IA-LABEL: atomicrmw_xchg_0_i16_acquire:
 ; RV32IA:       # %bb.0:
-; RV32IA-NEXT:    andi a2, a0, -4
+; RV32IA-NEXT:    andi a1, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    lui a3, 16
-; RV32IA-NEXT:    addi a3, a3, -1
-; RV32IA-NEXT:    sll a4, a3, a0
-; RV32IA-NEXT:    and a1, a1, a3
-; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:  .LBB56_1: # =>This Inner Loop Header: Depth=1
-; RV32IA-NEXT:    lr.w.aq a3, (a2)
-; RV32IA-NEXT:    mv a5, a1
-; RV32IA-NEXT:    xor a5, a3, a5
-; RV32IA-NEXT:    and a5, a5, a4
-; RV32IA-NEXT:    xor a5, a3, a5
-; RV32IA-NEXT:    sc.w a5, a5, (a2)
-; RV32IA-NEXT:    bnez a5, .LBB56_1
+; RV32IA-NEXT:    lui a2, 16
+; RV32IA-NEXT:    addi a2, a2, -1
+; RV32IA-NEXT:    sll a2, a2, a0
+; RV32IA-NEXT:  .LBB66_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:    lr.w.aq a3, (a1)
+; RV32IA-NEXT:    li a4, 0
+; RV32IA-NEXT:    xor a4, a3, a4
+; RV32IA-NEXT:    and a4, a4, a2
+; RV32IA-NEXT:    xor a4, a3, a4
+; RV32IA-NEXT:    sc.w a4, a4, (a1)
+; RV32IA-NEXT:    bnez a4, .LBB66_1
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a3, a0
 ; RV32IA-NEXT:    ret
 ;
-; RV64I-LABEL: atomicrmw_xchg_i16_acquire:
+; RV64I-LABEL: atomicrmw_xchg_0_i16_acquire:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 2
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __atomic_exchange_2 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
-; RV64IA-LABEL: atomicrmw_xchg_i16_acquire:
+; RV64IA-LABEL: atomicrmw_xchg_0_i16_acquire:
 ; RV64IA:       # %bb.0:
-; RV64IA-NEXT:    andi a2, a0, -4
+; RV64IA-NEXT:    andi a1, a0, -4
 ; RV64IA-NEXT:    slli a0, a0, 3
-; RV64IA-NEXT:    lui a3, 16
-; RV64IA-NEXT:    addiw a3, a3, -1
-; RV64IA-NEXT:    sllw a4, a3, a0
-; RV64IA-NEXT:    and a1, a1, a3
-; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:  .LBB56_1: # =>This Inner Loop Header: Depth=1
-; RV64IA-NEXT:    lr.w.aq a3, (a2)
-; RV64IA-NEXT:    mv a5, a1
-; RV64IA-NEXT:    xor a5, a3, a5
-; RV64IA-NEXT:    and a5, a5, a4
-; RV64IA-NEXT:    xor a5, a3, a5
-; RV64IA-NEXT:    sc.w a5, a5, (a2)
-; RV64IA-NEXT:    bnez a5, .LBB56_1
+; RV64IA-NEXT:    lui a2, 16
+; RV64IA-NEXT:    addiw a2, a2, -1
+; RV64IA-NEXT:    sllw a2, a2, a0
+; RV64IA-NEXT:  .LBB66_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:    lr.w.aq a3, (a1)
+; RV64IA-NEXT:    li a4, 0
+; RV64IA-NEXT:    xor a4, a3, a4
+; RV64IA-NEXT:    and a4, a4, a2
+; RV64IA-NEXT:    xor a4, a3, a4
+; RV64IA-NEXT:    sc.w a4, a4, (a1)
+; RV64IA-NEXT:    bnez a4, .LBB66_1
 ; RV64IA-NEXT:  # %bb.2:
 ; RV64IA-NEXT:    srlw a0, a3, a0
 ; RV64IA-NEXT:    ret
-  %1 = atomicrmw xchg ptr %a, i16 %b acquire
+  %1 = atomicrmw xchg ptr %a, i16 0 acquire
   ret i16 %1
 }
 
-define i16 @atomicrmw_xchg_i16_release(ptr %a, i16 %b) nounwind {
-; RV32I-LABEL: atomicrmw_xchg_i16_release:
+define i16 @atomicrmw_xchg_0_i16_release(ptr %a) nounwind {
+; RV32I-LABEL: atomicrmw_xchg_0_i16_release:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 3
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __atomic_exchange_2 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
-; RV32IA-LABEL: atomicrmw_xchg_i16_release:
+; RV32IA-LABEL: atomicrmw_xchg_0_i16_release:
 ; RV32IA:       # %bb.0:
-; RV32IA-NEXT:    andi a2, a0, -4
+; RV32IA-NEXT:    andi a1, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    lui a3, 16
-; RV32IA-NEXT:    addi a3, a3, -1
-; RV32IA-NEXT:    sll a4, a3, a0
-; RV32IA-NEXT:    and a1, a1, a3
-; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:  .LBB57_1: # =>This Inner Loop Header: Depth=1
-; RV32IA-NEXT:    lr.w a3, (a2)
-; RV32IA-NEXT:    mv a5, a1
-; RV32IA-NEXT:    xor a5, a3, a5
-; RV32IA-NEXT:    and a5, a5, a4
-; RV32IA-NEXT:    xor a5, a3, a5
-; RV32IA-NEXT:    sc.w.rl a5, a5, (a2)
-; RV32IA-NEXT:    bnez a5, .LBB57_1
+; RV32IA-NEXT:    lui a2, 16
+; RV32IA-NEXT:    addi a2, a2, -1
+; RV32IA-NEXT:    sll a2, a2, a0
+; RV32IA-NEXT:  .LBB67_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:    lr.w a3, (a1)
+; RV32IA-NEXT:    li a4, 0
+; RV32IA-NEXT:    xor a4, a3, a4
+; RV32IA-NEXT:    and a4, a4, a2
+; RV32IA-NEXT:    xor a4, a3, a4
+; RV32IA-NEXT:    sc.w.rl a4, a4, (a1)
+; RV32IA-NEXT:    bnez a4, .LBB67_1
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a3, a0
 ; RV32IA-NEXT:    ret
 ;
-; RV64I-LABEL: atomicrmw_xchg_i16_release:
+; RV64I-LABEL: atomicrmw_xchg_0_i16_release:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 3
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __atomic_exchange_2 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
-; RV64IA-LABEL: atomicrmw_xchg_i16_release:
+; RV64IA-LABEL: atomicrmw_xchg_0_i16_release:
 ; RV64IA:       # %bb.0:
-; RV64IA-NEXT:    andi a2, a0, -4
+; RV64IA-NEXT:    andi a1, a0, -4
 ; RV64IA-NEXT:    slli a0, a0, 3
-; RV64IA-NEXT:    lui a3, 16
-; RV64IA-NEXT:    addiw a3, a3, -1
-; RV64IA-NEXT:    sllw a4, a3, a0
-; RV64IA-NEXT:    and a1, a1, a3
-; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:  .LBB57_1: # =>This Inner Loop Header: Depth=1
-; RV64IA-NEXT:    lr.w a3, (a2)
-; RV64IA-NEXT:    mv a5, a1
-; RV64IA-NEXT:    xor a5, a3, a5
-; RV64IA-NEXT:    and a5, a5, a4
-; RV64IA-NEXT:    xor a5, a3, a5
-; RV64IA-NEXT:    sc.w.rl a5, a5, (a2)
-; RV64IA-NEXT:    bnez a5, .LBB57_1
+; RV64IA-NEXT:    lui a2, 16
+; RV64IA-NEXT:    addiw a2, a2, -1
+; RV64IA-NEXT:    sllw a2, a2, a0
+; RV64IA-NEXT:  .LBB67_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:    lr.w a3, (a1)
+; RV64IA-NEXT:    li a4, 0
+; RV64IA-NEXT:    xor a4, a3, a4
+; RV64IA-NEXT:    and a4, a4, a2
+; RV64IA-NEXT:    xor a4, a3, a4
+; RV64IA-NEXT:    sc.w.rl a4, a4, (a1)
+; RV64IA-NEXT:    bnez a4, .LBB67_1
 ; RV64IA-NEXT:  # %bb.2:
 ; RV64IA-NEXT:    srlw a0, a3, a0
 ; RV64IA-NEXT:    ret
-  %1 = atomicrmw xchg ptr %a, i16 %b release
+  %1 = atomicrmw xchg ptr %a, i16 0 release
   ret i16 %1
 }
 
-define i16 @atomicrmw_xchg_i16_acq_rel(ptr %a, i16 %b) nounwind {
-; RV32I-LABEL: atomicrmw_xchg_i16_acq_rel:
+define i16 @atomicrmw_xchg_0_i16_acq_rel(ptr %a) nounwind {
+; RV32I-LABEL: atomicrmw_xchg_0_i16_acq_rel:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 4
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __atomic_exchange_2 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
-; RV32IA-LABEL: atomicrmw_xchg_i16_acq_rel:
+; RV32IA-LABEL: atomicrmw_xchg_0_i16_acq_rel:
 ; RV32IA:       # %bb.0:
-; RV32IA-NEXT:    andi a2, a0, -4
+; RV32IA-NEXT:    andi a1, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    lui a3, 16
-; RV32IA-NEXT:    addi a3, a3, -1
-; RV32IA-NEXT:    sll a4, a3, a0
-; RV32IA-NEXT:    and a1, a1, a3
-; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:  .LBB58_1: # =>This Inner Loop Header: Depth=1
-; RV32IA-NEXT:    lr.w.aq a3, (a2)
-; RV32IA-NEXT:    mv a5, a1
-; RV32IA-NEXT:    xor a5, a3, a5
-; RV32IA-NEXT:    and a5, a5, a4
-; RV32IA-NEXT:    xor a5, a3, a5
-; RV32IA-NEXT:    sc.w.rl a5, a5, (a2)
-; RV32IA-NEXT:    bnez a5, .LBB58_1
+; RV32IA-NEXT:    lui a2, 16
+; RV32IA-NEXT:    addi a2, a2, -1
+; RV32IA-NEXT:    sll a2, a2, a0
+; RV32IA-NEXT:  .LBB68_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:    lr.w.aq a3, (a1)
+; RV32IA-NEXT:    li a4, 0
+; RV32IA-NEXT:    xor a4, a3, a4
+; RV32IA-NEXT:    and a4, a4, a2
+; RV32IA-NEXT:    xor a4, a3, a4
+; RV32IA-NEXT:    sc.w.rl a4, a4, (a1)
+; RV32IA-NEXT:    bnez a4, .LBB68_1
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a3, a0
 ; RV32IA-NEXT:    ret
 ;
-; RV64I-LABEL: atomicrmw_xchg_i16_acq_rel:
+; RV64I-LABEL: atomicrmw_xchg_0_i16_acq_rel:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 4
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __atomic_exchange_2 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
-; RV64IA-LABEL: atomicrmw_xchg_i16_acq_rel:
+; RV64IA-LABEL: atomicrmw_xchg_0_i16_acq_rel:
 ; RV64IA:       # %bb.0:
-; RV64IA-NEXT:    andi a2, a0, -4
+; RV64IA-NEXT:    andi a1, a0, -4
 ; RV64IA-NEXT:    slli a0, a0, 3
-; RV64IA-NEXT:    lui a3, 16
-; RV64IA-NEXT:    addiw a3, a3, -1
-; RV64IA-NEXT:    sllw a4, a3, a0
-; RV64IA-NEXT:    and a1, a1, a3
-; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:  .LBB58_1: # =>This Inner Loop Header: Depth=1
-; RV64IA-NEXT:    lr.w.aq a3, (a2)
-; RV64IA-NEXT:    mv a5, a1
-; RV64IA-NEXT:    xor a5, a3, a5
-; RV64IA-NEXT:    and a5, a5, a4
-; RV64IA-NEXT:    xor a5, a3, a5
-; RV64IA-NEXT:    sc.w.rl a5, a5, (a2)
-; RV64IA-NEXT:    bnez a5, .LBB58_1
+; RV64IA-NEXT:    lui a2, 16
+; RV64IA-NEXT:    addiw a2, a2, -1
+; RV64IA-NEXT:    sllw a2, a2, a0
+; RV64IA-NEXT:  .LBB68_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:    lr.w.aq a3, (a1)
+; RV64IA-NEXT:    li a4, 0
+; RV64IA-NEXT:    xor a4, a3, a4
+; RV64IA-NEXT:    and a4, a4, a2
+; RV64IA-NEXT:    xor a4, a3, a4
+; RV64IA-NEXT:    sc.w.rl a4, a4, (a1)
+; RV64IA-NEXT:    bnez a4, .LBB68_1
 ; RV64IA-NEXT:  # %bb.2:
 ; RV64IA-NEXT:    srlw a0, a3, a0
 ; RV64IA-NEXT:    ret
-  %1 = atomicrmw xchg ptr %a, i16 %b acq_rel
+  %1 = atomicrmw xchg ptr %a, i16 0 acq_rel
   ret i16 %1
 }
 
-define i16 @atomicrmw_xchg_i16_seq_cst(ptr %a, i16 %b) nounwind {
-; RV32I-LABEL: atomicrmw_xchg_i16_seq_cst:
+define i16 @atomicrmw_xchg_0_i16_seq_cst(ptr %a) nounwind {
+; RV32I-LABEL: atomicrmw_xchg_0_i16_seq_cst:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a2, 5
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __atomic_exchange_2 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
-; RV32IA-LABEL: atomicrmw_xchg_i16_seq_cst:
+; RV32IA-LABEL: atomicrmw_xchg_0_i16_seq_cst:
 ; RV32IA:       # %bb.0:
-; RV32IA-NEXT:    andi a2, a0, -4
+; RV32IA-NEXT:    andi a1, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    lui a3, 16
-; RV32IA-NEXT:    addi a3, a3, -1
-; RV32IA-NEXT:    sll a4, a3, a0
-; RV32IA-NEXT:    and a1, a1, a3
-; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:  .LBB59_1: # =>This Inner Loop Header: Depth=1
-; RV32IA-NEXT:    lr.w.aqrl a3, (a2)
-; RV32IA-NEXT:    mv a5, a1
-; RV32IA-NEXT:    xor a5, a3, a5
-; RV32IA-NEXT:    and a5, a5, a4
-; RV32IA-NEXT:    xor a5, a3, a5
-; RV32IA-NEXT:    sc.w.rl a5, a5, (a2)
-; RV32IA-NEXT:    bnez a5, .LBB59_1
+; RV32IA-NEXT:    lui a2, 16
+; RV32IA-NEXT:    addi a2, a2, -1
+; RV32IA-NEXT:    sll a2, a2, a0
+; RV32IA-NEXT:  .LBB69_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:    lr.w.aqrl a3, (a1)
+; RV32IA-NEXT:    li a4, 0
+; RV32IA-NEXT:    xor a4, a3, a4
+; RV32IA-NEXT:    and a4, a4, a2
+; RV32IA-NEXT:    xor a4, a3, a4
+; RV32IA-NEXT:    sc.w.rl a4, a4, (a1)
+; RV32IA-NEXT:    bnez a4, .LBB69_1
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a3, a0
 ; RV32IA-NEXT:    ret
 ;
-; RV64I-LABEL: atomicrmw_xchg_i16_seq_cst:
+; RV64I-LABEL: atomicrmw_xchg_0_i16_seq_cst:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a2, 5
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __atomic_exchange_2 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
-; RV64IA-LABEL: atomicrmw_xchg_i16_seq_cst:
+; RV64IA-LABEL: atomicrmw_xchg_0_i16_seq_cst:
 ; RV64IA:       # %bb.0:
-; RV64IA-NEXT:    andi a2, a0, -4
+; RV64IA-NEXT:    andi a1, a0, -4
 ; RV64IA-NEXT:    slli a0, a0, 3
-; RV64IA-NEXT:    lui a3, 16
-; RV64IA-NEXT:    addiw a3, a3, -1
-; RV64IA-NEXT:    sllw a4, a3, a0
-; RV64IA-NEXT:    and a1, a1, a3
-; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:  .LBB59_1: # =>This Inner Loop Header: Depth=1
-; RV64IA-NEXT:    lr.w.aqrl a3, (a2)
-; RV64IA-NEXT:    mv a5, a1
-; RV64IA-NEXT:    xor a5, a3, a5
-; RV64IA-NEXT:    and a5, a5, a4
-; RV64IA-NEXT:    xor a5, a3, a5
-; RV64IA-NEXT:    sc.w.rl a5, a5, (a2)
-; RV64IA-NEXT:    bnez a5, .LBB59_1
+; RV64IA-NEXT:    lui a2, 16
+; RV64IA-NEXT:    addiw a2, a2, -1
+; RV64IA-NEXT:    sllw a2, a2, a0
+; RV64IA-NEXT:  .LBB69_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:    lr.w.aqrl a3, (a1)
+; RV64IA-NEXT:    li a4, 0
+; RV64IA-NEXT:    xor a4, a3, a4
+; RV64IA-NEXT:    and a4, a4, a2
+; RV64IA-NEXT:    xor a4, a3, a4
+; RV64IA-NEXT:    sc.w.rl a4, a4, (a1)
+; RV64IA-NEXT:    bnez a4, .LBB69_1
 ; RV64IA-NEXT:  # %bb.2:
 ; RV64IA-NEXT:    srlw a0, a3, a0
 ; RV64IA-NEXT:    ret
-  %1 = atomicrmw xchg ptr %a, i16 %b seq_cst
+  %1 = atomicrmw xchg ptr %a, i16 0 seq_cst
   ret i16 %1
 }
 
@@ -5132,14 +5768,14 @@ define i16 @atomicrmw_add_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV32IA-NEXT:    sll a4, a3, a0
 ; RV32IA-NEXT:    and a1, a1, a3
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:  .LBB60_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB70_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w a3, (a2)
 ; RV32IA-NEXT:    add a5, a3, a1
 ; RV32IA-NEXT:    xor a5, a3, a5
 ; RV32IA-NEXT:    and a5, a5, a4
 ; RV32IA-NEXT:    xor a5, a3, a5
 ; RV32IA-NEXT:    sc.w a5, a5, (a2)
-; RV32IA-NEXT:    bnez a5, .LBB60_1
+; RV32IA-NEXT:    bnez a5, .LBB70_1
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a3, a0
 ; RV32IA-NEXT:    ret
@@ -5163,14 +5799,14 @@ define i16 @atomicrmw_add_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV64IA-NEXT:    sllw a4, a3, a0
 ; RV64IA-NEXT:    and a1, a1, a3
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:  .LBB60_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB70_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w a3, (a2)
 ; RV64IA-NEXT:    add a5, a3, a1
 ; RV64IA-NEXT:    xor a5, a3, a5
 ; RV64IA-NEXT:    and a5, a5, a4
 ; RV64IA-NEXT:    xor a5, a3, a5
 ; RV64IA-NEXT:    sc.w a5, a5, (a2)
-; RV64IA-NEXT:    bnez a5, .LBB60_1
+; RV64IA-NEXT:    bnez a5, .LBB70_1
 ; RV64IA-NEXT:  # %bb.2:
 ; RV64IA-NEXT:    srlw a0, a3, a0
 ; RV64IA-NEXT:    ret
@@ -5198,14 +5834,14 @@ define i16 @atomicrmw_add_i16_acquire(ptr %a, i16 %b) nounwind {
 ; RV32IA-NEXT:    sll a4, a3, a0
 ; RV32IA-NEXT:    and a1, a1, a3
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:  .LBB61_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB71_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w.aq a3, (a2)
 ; RV32IA-NEXT:    add a5, a3, a1
 ; RV32IA-NEXT:    xor a5, a3, a5
 ; RV32IA-NEXT:    and a5, a5, a4
 ; RV32IA-NEXT:    xor a5, a3, a5
 ; RV32IA-NEXT:    sc.w a5, a5, (a2)
-; RV32IA-NEXT:    bnez a5, .LBB61_1
+; RV32IA-NEXT:    bnez a5, .LBB71_1
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a3, a0
 ; RV32IA-NEXT:    ret
@@ -5229,14 +5865,14 @@ define i16 @atomicrmw_add_i16_acquire(ptr %a, i16 %b) nounwind {
 ; RV64IA-NEXT:    sllw a4, a3, a0
 ; RV64IA-NEXT:    and a1, a1, a3
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:  .LBB61_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB71_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w.aq a3, (a2)
 ; RV64IA-NEXT:    add a5, a3, a1
 ; RV64IA-NEXT:    xor a5, a3, a5
 ; RV64IA-NEXT:    and a5, a5, a4
 ; RV64IA-NEXT:    xor a5, a3, a5
 ; RV64IA-NEXT:    sc.w a5, a5, (a2)
-; RV64IA-NEXT:    bnez a5, .LBB61_1
+; RV64IA-NEXT:    bnez a5, .LBB71_1
 ; RV64IA-NEXT:  # %bb.2:
 ; RV64IA-NEXT:    srlw a0, a3, a0
 ; RV64IA-NEXT:    ret
@@ -5264,14 +5900,14 @@ define i16 @atomicrmw_add_i16_release(ptr %a, i16 %b) nounwind {
 ; RV32IA-NEXT:    sll a4, a3, a0
 ; RV32IA-NEXT:    and a1, a1, a3
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:  .LBB62_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB72_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w a3, (a2)
 ; RV32IA-NEXT:    add a5, a3, a1
 ; RV32IA-NEXT:    xor a5, a3, a5
 ; RV32IA-NEXT:    and a5, a5, a4
 ; RV32IA-NEXT:    xor a5, a3, a5
 ; RV32IA-NEXT:    sc.w.rl a5, a5, (a2)
-; RV32IA-NEXT:    bnez a5, .LBB62_1
+; RV32IA-NEXT:    bnez a5, .LBB72_1
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a3, a0
 ; RV32IA-NEXT:    ret
@@ -5295,14 +5931,14 @@ define i16 @atomicrmw_add_i16_release(ptr %a, i16 %b) nounwind {
 ; RV64IA-NEXT:    sllw a4, a3, a0
 ; RV64IA-NEXT:    and a1, a1, a3
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:  .LBB62_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB72_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w a3, (a2)
 ; RV64IA-NEXT:    add a5, a3, a1
 ; RV64IA-NEXT:    xor a5, a3, a5
 ; RV64IA-NEXT:    and a5, a5, a4
 ; RV64IA-NEXT:    xor a5, a3, a5
 ; RV64IA-NEXT:    sc.w.rl a5, a5, (a2)
-; RV64IA-NEXT:    bnez a5, .LBB62_1
+; RV64IA-NEXT:    bnez a5, .LBB72_1
 ; RV64IA-NEXT:  # %bb.2:
 ; RV64IA-NEXT:    srlw a0, a3, a0
 ; RV64IA-NEXT:    ret
@@ -5330,14 +5966,14 @@ define i16 @atomicrmw_add_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; RV32IA-NEXT:    sll a4, a3, a0
 ; RV32IA-NEXT:    and a1, a1, a3
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:  .LBB63_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB73_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w.aq a3, (a2)
 ; RV32IA-NEXT:    add a5, a3, a1
 ; RV32IA-NEXT:    xor a5, a3, a5
 ; RV32IA-NEXT:    and a5, a5, a4
 ; RV32IA-NEXT:    xor a5, a3, a5
 ; RV32IA-NEXT:    sc.w.rl a5, a5, (a2)
-; RV32IA-NEXT:    bnez a5, .LBB63_1
+; RV32IA-NEXT:    bnez a5, .LBB73_1
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a3, a0
 ; RV32IA-NEXT:    ret
@@ -5361,14 +5997,14 @@ define i16 @atomicrmw_add_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; RV64IA-NEXT:    sllw a4, a3, a0
 ; RV64IA-NEXT:    and a1, a1, a3
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:  .LBB63_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB73_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w.aq a3, (a2)
 ; RV64IA-NEXT:    add a5, a3, a1
 ; RV64IA-NEXT:    xor a5, a3, a5
 ; RV64IA-NEXT:    and a5, a5, a4
 ; RV64IA-NEXT:    xor a5, a3, a5
 ; RV64IA-NEXT:    sc.w.rl a5, a5, (a2)
-; RV64IA-NEXT:    bnez a5, .LBB63_1
+; RV64IA-NEXT:    bnez a5, .LBB73_1
 ; RV64IA-NEXT:  # %bb.2:
 ; RV64IA-NEXT:    srlw a0, a3, a0
 ; RV64IA-NEXT:    ret
@@ -5396,14 +6032,14 @@ define i16 @atomicrmw_add_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; RV32IA-NEXT:    sll a4, a3, a0
 ; RV32IA-NEXT:    and a1, a1, a3
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:  .LBB64_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB74_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w.aqrl a3, (a2)
 ; RV32IA-NEXT:    add a5, a3, a1
 ; RV32IA-NEXT:    xor a5, a3, a5
 ; RV32IA-NEXT:    and a5, a5, a4
 ; RV32IA-NEXT:    xor a5, a3, a5
 ; RV32IA-NEXT:    sc.w.rl a5, a5, (a2)
-; RV32IA-NEXT:    bnez a5, .LBB64_1
+; RV32IA-NEXT:    bnez a5, .LBB74_1
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a3, a0
 ; RV32IA-NEXT:    ret
@@ -5427,14 +6063,14 @@ define i16 @atomicrmw_add_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; RV64IA-NEXT:    sllw a4, a3, a0
 ; RV64IA-NEXT:    and a1, a1, a3
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:  .LBB64_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB74_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w.aqrl a3, (a2)
 ; RV64IA-NEXT:    add a5, a3, a1
 ; RV64IA-NEXT:    xor a5, a3, a5
 ; RV64IA-NEXT:    and a5, a5, a4
 ; RV64IA-NEXT:    xor a5, a3, a5
 ; RV64IA-NEXT:    sc.w.rl a5, a5, (a2)
-; RV64IA-NEXT:    bnez a5, .LBB64_1
+; RV64IA-NEXT:    bnez a5, .LBB74_1
 ; RV64IA-NEXT:  # %bb.2:
 ; RV64IA-NEXT:    srlw a0, a3, a0
 ; RV64IA-NEXT:    ret
@@ -5462,14 +6098,14 @@ define i16 @atomicrmw_sub_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV32IA-NEXT:    sll a4, a3, a0
 ; RV32IA-NEXT:    and a1, a1, a3
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:  .LBB65_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB75_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w a3, (a2)
 ; RV32IA-NEXT:    sub a5, a3, a1
 ; RV32IA-NEXT:    xor a5, a3, a5
 ; RV32IA-NEXT:    and a5, a5, a4
 ; RV32IA-NEXT:    xor a5, a3, a5
 ; RV32IA-NEXT:    sc.w a5, a5, (a2)
-; RV32IA-NEXT:    bnez a5, .LBB65_1
+; RV32IA-NEXT:    bnez a5, .LBB75_1
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a3, a0
 ; RV32IA-NEXT:    ret
@@ -5493,14 +6129,14 @@ define i16 @atomicrmw_sub_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV64IA-NEXT:    sllw a4, a3, a0
 ; RV64IA-NEXT:    and a1, a1, a3
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:  .LBB65_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB75_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w a3, (a2)
 ; RV64IA-NEXT:    sub a5, a3, a1
 ; RV64IA-NEXT:    xor a5, a3, a5
 ; RV64IA-NEXT:    and a5, a5, a4
 ; RV64IA-NEXT:    xor a5, a3, a5
 ; RV64IA-NEXT:    sc.w a5, a5, (a2)
-; RV64IA-NEXT:    bnez a5, .LBB65_1
+; RV64IA-NEXT:    bnez a5, .LBB75_1
 ; RV64IA-NEXT:  # %bb.2:
 ; RV64IA-NEXT:    srlw a0, a3, a0
 ; RV64IA-NEXT:    ret
@@ -5528,14 +6164,14 @@ define i16 @atomicrmw_sub_i16_acquire(ptr %a, i16 %b) nounwind {
 ; RV32IA-NEXT:    sll a4, a3, a0
 ; RV32IA-NEXT:    and a1, a1, a3
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:  .LBB66_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB76_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w.aq a3, (a2)
 ; RV32IA-NEXT:    sub a5, a3, a1
 ; RV32IA-NEXT:    xor a5, a3, a5
 ; RV32IA-NEXT:    and a5, a5, a4
 ; RV32IA-NEXT:    xor a5, a3, a5
 ; RV32IA-NEXT:    sc.w a5, a5, (a2)
-; RV32IA-NEXT:    bnez a5, .LBB66_1
+; RV32IA-NEXT:    bnez a5, .LBB76_1
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a3, a0
 ; RV32IA-NEXT:    ret
@@ -5559,14 +6195,14 @@ define i16 @atomicrmw_sub_i16_acquire(ptr %a, i16 %b) nounwind {
 ; RV64IA-NEXT:    sllw a4, a3, a0
 ; RV64IA-NEXT:    and a1, a1, a3
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:  .LBB66_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB76_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w.aq a3, (a2)
 ; RV64IA-NEXT:    sub a5, a3, a1
 ; RV64IA-NEXT:    xor a5, a3, a5
 ; RV64IA-NEXT:    and a5, a5, a4
 ; RV64IA-NEXT:    xor a5, a3, a5
 ; RV64IA-NEXT:    sc.w a5, a5, (a2)
-; RV64IA-NEXT:    bnez a5, .LBB66_1
+; RV64IA-NEXT:    bnez a5, .LBB76_1
 ; RV64IA-NEXT:  # %bb.2:
 ; RV64IA-NEXT:    srlw a0, a3, a0
 ; RV64IA-NEXT:    ret
@@ -5594,14 +6230,14 @@ define i16 @atomicrmw_sub_i16_release(ptr %a, i16 %b) nounwind {
 ; RV32IA-NEXT:    sll a4, a3, a0
 ; RV32IA-NEXT:    and a1, a1, a3
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:  .LBB67_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB77_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w a3, (a2)
 ; RV32IA-NEXT:    sub a5, a3, a1
 ; RV32IA-NEXT:    xor a5, a3, a5
 ; RV32IA-NEXT:    and a5, a5, a4
 ; RV32IA-NEXT:    xor a5, a3, a5
 ; RV32IA-NEXT:    sc.w.rl a5, a5, (a2)
-; RV32IA-NEXT:    bnez a5, .LBB67_1
+; RV32IA-NEXT:    bnez a5, .LBB77_1
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a3, a0
 ; RV32IA-NEXT:    ret
@@ -5625,14 +6261,14 @@ define i16 @atomicrmw_sub_i16_release(ptr %a, i16 %b) nounwind {
 ; RV64IA-NEXT:    sllw a4, a3, a0
 ; RV64IA-NEXT:    and a1, a1, a3
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:  .LBB67_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB77_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w a3, (a2)
 ; RV64IA-NEXT:    sub a5, a3, a1
 ; RV64IA-NEXT:    xor a5, a3, a5
 ; RV64IA-NEXT:    and a5, a5, a4
 ; RV64IA-NEXT:    xor a5, a3, a5
 ; RV64IA-NEXT:    sc.w.rl a5, a5, (a2)
-; RV64IA-NEXT:    bnez a5, .LBB67_1
+; RV64IA-NEXT:    bnez a5, .LBB77_1
 ; RV64IA-NEXT:  # %bb.2:
 ; RV64IA-NEXT:    srlw a0, a3, a0
 ; RV64IA-NEXT:    ret
@@ -5660,14 +6296,14 @@ define i16 @atomicrmw_sub_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; RV32IA-NEXT:    sll a4, a3, a0
 ; RV32IA-NEXT:    and a1, a1, a3
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:  .LBB68_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB78_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w.aq a3, (a2)
 ; RV32IA-NEXT:    sub a5, a3, a1
 ; RV32IA-NEXT:    xor a5, a3, a5
 ; RV32IA-NEXT:    and a5, a5, a4
 ; RV32IA-NEXT:    xor a5, a3, a5
 ; RV32IA-NEXT:    sc.w.rl a5, a5, (a2)
-; RV32IA-NEXT:    bnez a5, .LBB68_1
+; RV32IA-NEXT:    bnez a5, .LBB78_1
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a3, a0
 ; RV32IA-NEXT:    ret
@@ -5691,14 +6327,14 @@ define i16 @atomicrmw_sub_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; RV64IA-NEXT:    sllw a4, a3, a0
 ; RV64IA-NEXT:    and a1, a1, a3
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:  .LBB68_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB78_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w.aq a3, (a2)
 ; RV64IA-NEXT:    sub a5, a3, a1
 ; RV64IA-NEXT:    xor a5, a3, a5
 ; RV64IA-NEXT:    and a5, a5, a4
 ; RV64IA-NEXT:    xor a5, a3, a5
 ; RV64IA-NEXT:    sc.w.rl a5, a5, (a2)
-; RV64IA-NEXT:    bnez a5, .LBB68_1
+; RV64IA-NEXT:    bnez a5, .LBB78_1
 ; RV64IA-NEXT:  # %bb.2:
 ; RV64IA-NEXT:    srlw a0, a3, a0
 ; RV64IA-NEXT:    ret
@@ -5726,14 +6362,14 @@ define i16 @atomicrmw_sub_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; RV32IA-NEXT:    sll a4, a3, a0
 ; RV32IA-NEXT:    and a1, a1, a3
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:  .LBB69_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB79_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w.aqrl a3, (a2)
 ; RV32IA-NEXT:    sub a5, a3, a1
 ; RV32IA-NEXT:    xor a5, a3, a5
 ; RV32IA-NEXT:    and a5, a5, a4
 ; RV32IA-NEXT:    xor a5, a3, a5
 ; RV32IA-NEXT:    sc.w.rl a5, a5, (a2)
-; RV32IA-NEXT:    bnez a5, .LBB69_1
+; RV32IA-NEXT:    bnez a5, .LBB79_1
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a3, a0
 ; RV32IA-NEXT:    ret
@@ -5757,14 +6393,14 @@ define i16 @atomicrmw_sub_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; RV64IA-NEXT:    sllw a4, a3, a0
 ; RV64IA-NEXT:    and a1, a1, a3
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:  .LBB69_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB79_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w.aqrl a3, (a2)
 ; RV64IA-NEXT:    sub a5, a3, a1
 ; RV64IA-NEXT:    xor a5, a3, a5
 ; RV64IA-NEXT:    and a5, a5, a4
 ; RV64IA-NEXT:    xor a5, a3, a5
 ; RV64IA-NEXT:    sc.w.rl a5, a5, (a2)
-; RV64IA-NEXT:    bnez a5, .LBB69_1
+; RV64IA-NEXT:    bnez a5, .LBB79_1
 ; RV64IA-NEXT:  # %bb.2:
 ; RV64IA-NEXT:    srlw a0, a3, a0
 ; RV64IA-NEXT:    ret
@@ -6062,7 +6698,7 @@ define i16 @atomicrmw_nand_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV32IA-NEXT:    sll a4, a3, a0
 ; RV32IA-NEXT:    and a1, a1, a3
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:  .LBB75_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB85_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w a3, (a2)
 ; RV32IA-NEXT:    and a5, a3, a1
 ; RV32IA-NEXT:    not a5, a5
@@ -6070,7 +6706,7 @@ define i16 @atomicrmw_nand_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV32IA-NEXT:    and a5, a5, a4
 ; RV32IA-NEXT:    xor a5, a3, a5
 ; RV32IA-NEXT:    sc.w a5, a5, (a2)
-; RV32IA-NEXT:    bnez a5, .LBB75_1
+; RV32IA-NEXT:    bnez a5, .LBB85_1
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a3, a0
 ; RV32IA-NEXT:    ret
@@ -6094,7 +6730,7 @@ define i16 @atomicrmw_nand_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV64IA-NEXT:    sllw a4, a3, a0
 ; RV64IA-NEXT:    and a1, a1, a3
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:  .LBB75_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB85_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w a3, (a2)
 ; RV64IA-NEXT:    and a5, a3, a1
 ; RV64IA-NEXT:    not a5, a5
@@ -6102,7 +6738,7 @@ define i16 @atomicrmw_nand_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV64IA-NEXT:    and a5, a5, a4
 ; RV64IA-NEXT:    xor a5, a3, a5
 ; RV64IA-NEXT:    sc.w a5, a5, (a2)
-; RV64IA-NEXT:    bnez a5, .LBB75_1
+; RV64IA-NEXT:    bnez a5, .LBB85_1
 ; RV64IA-NEXT:  # %bb.2:
 ; RV64IA-NEXT:    srlw a0, a3, a0
 ; RV64IA-NEXT:    ret
@@ -6130,7 +6766,7 @@ define i16 @atomicrmw_nand_i16_acquire(ptr %a, i16 %b) nounwind {
 ; RV32IA-NEXT:    sll a4, a3, a0
 ; RV32IA-NEXT:    and a1, a1, a3
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:  .LBB76_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB86_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w.aq a3, (a2)
 ; RV32IA-NEXT:    and a5, a3, a1
 ; RV32IA-NEXT:    not a5, a5
@@ -6138,7 +6774,7 @@ define i16 @atomicrmw_nand_i16_acquire(ptr %a, i16 %b) nounwind {
 ; RV32IA-NEXT:    and a5, a5, a4
 ; RV32IA-NEXT:    xor a5, a3, a5
 ; RV32IA-NEXT:    sc.w a5, a5, (a2)
-; RV32IA-NEXT:    bnez a5, .LBB76_1
+; RV32IA-NEXT:    bnez a5, .LBB86_1
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a3, a0
 ; RV32IA-NEXT:    ret
@@ -6162,7 +6798,7 @@ define i16 @atomicrmw_nand_i16_acquire(ptr %a, i16 %b) nounwind {
 ; RV64IA-NEXT:    sllw a4, a3, a0
 ; RV64IA-NEXT:    and a1, a1, a3
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:  .LBB76_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB86_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w.aq a3, (a2)
 ; RV64IA-NEXT:    and a5, a3, a1
 ; RV64IA-NEXT:    not a5, a5
@@ -6170,7 +6806,7 @@ define i16 @atomicrmw_nand_i16_acquire(ptr %a, i16 %b) nounwind {
 ; RV64IA-NEXT:    and a5, a5, a4
 ; RV64IA-NEXT:    xor a5, a3, a5
 ; RV64IA-NEXT:    sc.w a5, a5, (a2)
-; RV64IA-NEXT:    bnez a5, .LBB76_1
+; RV64IA-NEXT:    bnez a5, .LBB86_1
 ; RV64IA-NEXT:  # %bb.2:
 ; RV64IA-NEXT:    srlw a0, a3, a0
 ; RV64IA-NEXT:    ret
@@ -6198,7 +6834,7 @@ define i16 @atomicrmw_nand_i16_release(ptr %a, i16 %b) nounwind {
 ; RV32IA-NEXT:    sll a4, a3, a0
 ; RV32IA-NEXT:    and a1, a1, a3
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:  .LBB77_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB87_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w a3, (a2)
 ; RV32IA-NEXT:    and a5, a3, a1
 ; RV32IA-NEXT:    not a5, a5
@@ -6206,7 +6842,7 @@ define i16 @atomicrmw_nand_i16_release(ptr %a, i16 %b) nounwind {
 ; RV32IA-NEXT:    and a5, a5, a4
 ; RV32IA-NEXT:    xor a5, a3, a5
 ; RV32IA-NEXT:    sc.w.rl a5, a5, (a2)
-; RV32IA-NEXT:    bnez a5, .LBB77_1
+; RV32IA-NEXT:    bnez a5, .LBB87_1
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a3, a0
 ; RV32IA-NEXT:    ret
@@ -6230,7 +6866,7 @@ define i16 @atomicrmw_nand_i16_release(ptr %a, i16 %b) nounwind {
 ; RV64IA-NEXT:    sllw a4, a3, a0
 ; RV64IA-NEXT:    and a1, a1, a3
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:  .LBB77_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB87_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w a3, (a2)
 ; RV64IA-NEXT:    and a5, a3, a1
 ; RV64IA-NEXT:    not a5, a5
@@ -6238,7 +6874,7 @@ define i16 @atomicrmw_nand_i16_release(ptr %a, i16 %b) nounwind {
 ; RV64IA-NEXT:    and a5, a5, a4
 ; RV64IA-NEXT:    xor a5, a3, a5
 ; RV64IA-NEXT:    sc.w.rl a5, a5, (a2)
-; RV64IA-NEXT:    bnez a5, .LBB77_1
+; RV64IA-NEXT:    bnez a5, .LBB87_1
 ; RV64IA-NEXT:  # %bb.2:
 ; RV64IA-NEXT:    srlw a0, a3, a0
 ; RV64IA-NEXT:    ret
@@ -6266,7 +6902,7 @@ define i16 @atomicrmw_nand_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; RV32IA-NEXT:    sll a4, a3, a0
 ; RV32IA-NEXT:    and a1, a1, a3
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:  .LBB78_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB88_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w.aq a3, (a2)
 ; RV32IA-NEXT:    and a5, a3, a1
 ; RV32IA-NEXT:    not a5, a5
@@ -6274,7 +6910,7 @@ define i16 @atomicrmw_nand_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; RV32IA-NEXT:    and a5, a5, a4
 ; RV32IA-NEXT:    xor a5, a3, a5
 ; RV32IA-NEXT:    sc.w.rl a5, a5, (a2)
-; RV32IA-NEXT:    bnez a5, .LBB78_1
+; RV32IA-NEXT:    bnez a5, .LBB88_1
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a3, a0
 ; RV32IA-NEXT:    ret
@@ -6298,7 +6934,7 @@ define i16 @atomicrmw_nand_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; RV64IA-NEXT:    sllw a4, a3, a0
 ; RV64IA-NEXT:    and a1, a1, a3
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:  .LBB78_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB88_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w.aq a3, (a2)
 ; RV64IA-NEXT:    and a5, a3, a1
 ; RV64IA-NEXT:    not a5, a5
@@ -6306,7 +6942,7 @@ define i16 @atomicrmw_nand_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; RV64IA-NEXT:    and a5, a5, a4
 ; RV64IA-NEXT:    xor a5, a3, a5
 ; RV64IA-NEXT:    sc.w.rl a5, a5, (a2)
-; RV64IA-NEXT:    bnez a5, .LBB78_1
+; RV64IA-NEXT:    bnez a5, .LBB88_1
 ; RV64IA-NEXT:  # %bb.2:
 ; RV64IA-NEXT:    srlw a0, a3, a0
 ; RV64IA-NEXT:    ret
@@ -6334,7 +6970,7 @@ define i16 @atomicrmw_nand_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; RV32IA-NEXT:    sll a4, a3, a0
 ; RV32IA-NEXT:    and a1, a1, a3
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:  .LBB79_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB89_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w.aqrl a3, (a2)
 ; RV32IA-NEXT:    and a5, a3, a1
 ; RV32IA-NEXT:    not a5, a5
@@ -6342,7 +6978,7 @@ define i16 @atomicrmw_nand_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; RV32IA-NEXT:    and a5, a5, a4
 ; RV32IA-NEXT:    xor a5, a3, a5
 ; RV32IA-NEXT:    sc.w.rl a5, a5, (a2)
-; RV32IA-NEXT:    bnez a5, .LBB79_1
+; RV32IA-NEXT:    bnez a5, .LBB89_1
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    srl a0, a3, a0
 ; RV32IA-NEXT:    ret
@@ -6366,7 +7002,7 @@ define i16 @atomicrmw_nand_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; RV64IA-NEXT:    sllw a4, a3, a0
 ; RV64IA-NEXT:    and a1, a1, a3
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:  .LBB79_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB89_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w.aqrl a3, (a2)
 ; RV64IA-NEXT:    and a5, a3, a1
 ; RV64IA-NEXT:    not a5, a5
@@ -6374,7 +7010,7 @@ define i16 @atomicrmw_nand_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; RV64IA-NEXT:    and a5, a5, a4
 ; RV64IA-NEXT:    xor a5, a3, a5
 ; RV64IA-NEXT:    sc.w.rl a5, a5, (a2)
-; RV64IA-NEXT:    bnez a5, .LBB79_1
+; RV64IA-NEXT:    bnez a5, .LBB89_1
 ; RV64IA-NEXT:  # %bb.2:
 ; RV64IA-NEXT:    srlw a0, a3, a0
 ; RV64IA-NEXT:    ret
@@ -6855,9 +7491,9 @@ define i16 @atomicrmw_max_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    mv s1, a1
 ; RV32I-NEXT:    slli a0, a1, 16
 ; RV32I-NEXT:    srai s2, a0, 16
-; RV32I-NEXT:    j .LBB90_2
-; RV32I-NEXT:  .LBB90_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB90_2 Depth=1
+; RV32I-NEXT:    j .LBB100_2
+; RV32I-NEXT:  .LBB100_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB100_2 Depth=1
 ; RV32I-NEXT:    sh a3, 14(sp)
 ; RV32I-NEXT:    addi a1, sp, 14
 ; RV32I-NEXT:    mv a0, s0
@@ -6865,18 +7501,18 @@ define i16 @atomicrmw_max_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lh a3, 14(sp)
-; RV32I-NEXT:    bnez a0, .LBB90_4
-; RV32I-NEXT:  .LBB90_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB100_4
+; RV32I-NEXT:  .LBB100_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    slli a0, a3, 16
 ; RV32I-NEXT:    srai a0, a0, 16
 ; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    blt s2, a0, .LBB90_1
+; RV32I-NEXT:    blt s2, a0, .LBB100_1
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB90_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB100_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
-; RV32I-NEXT:    j .LBB90_1
-; RV32I-NEXT:  .LBB90_4: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB100_1
+; RV32I-NEXT:  .LBB100_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
@@ -6898,20 +7534,20 @@ define i16 @atomicrmw_max_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV32IA-NEXT:    sll a1, a1, a0
 ; RV32IA-NEXT:    li a5, 16
 ; RV32IA-NEXT:    sub a5, a5, a3
-; RV32IA-NEXT:  .LBB90_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB100_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w a3, (a2)
 ; RV32IA-NEXT:    and a7, a3, a4
 ; RV32IA-NEXT:    mv a6, a3
 ; RV32IA-NEXT:    sll a7, a7, a5
 ; RV32IA-NEXT:    sra a7, a7, a5
-; RV32IA-NEXT:    bge a7, a1, .LBB90_3
-; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB90_1 Depth=1
+; RV32IA-NEXT:    bge a7, a1, .LBB100_3
+; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB100_1 Depth=1
 ; RV32IA-NEXT:    xor a6, a3, a1
 ; RV32IA-NEXT:    and a6, a6, a4
 ; RV32IA-NEXT:    xor a6, a3, a6
-; RV32IA-NEXT:  .LBB90_3: # in Loop: Header=BB90_1 Depth=1
+; RV32IA-NEXT:  .LBB100_3: # in Loop: Header=BB100_1 Depth=1
 ; RV32IA-NEXT:    sc.w a6, a6, (a2)
-; RV32IA-NEXT:    bnez a6, .LBB90_1
+; RV32IA-NEXT:    bnez a6, .LBB100_1
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a3, a0
 ; RV32IA-NEXT:    ret
@@ -6928,9 +7564,9 @@ define i16 @atomicrmw_max_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    slli a0, a1, 48
 ; RV64I-NEXT:    srai s2, a0, 48
-; RV64I-NEXT:    j .LBB90_2
-; RV64I-NEXT:  .LBB90_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB90_2 Depth=1
+; RV64I-NEXT:    j .LBB100_2
+; RV64I-NEXT:  .LBB100_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB100_2 Depth=1
 ; RV64I-NEXT:    sh a3, 14(sp)
 ; RV64I-NEXT:    addi a1, sp, 14
 ; RV64I-NEXT:    mv a0, s0
@@ -6938,18 +7574,18 @@ define i16 @atomicrmw_max_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    lh a3, 14(sp)
-; RV64I-NEXT:    bnez a0, .LBB90_4
-; RV64I-NEXT:  .LBB90_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB100_4
+; RV64I-NEXT:  .LBB100_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    slli a0, a3, 48
 ; RV64I-NEXT:    srai a0, a0, 48
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    blt s2, a0, .LBB90_1
+; RV64I-NEXT:    blt s2, a0, .LBB100_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB90_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB100_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB90_1
-; RV64I-NEXT:  .LBB90_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB100_1
+; RV64I-NEXT:  .LBB100_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
@@ -6971,20 +7607,20 @@ define i16 @atomicrmw_max_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV64IA-NEXT:    sllw a1, a1, a0
 ; RV64IA-NEXT:    li a5, 48
 ; RV64IA-NEXT:    sub a5, a5, a3
-; RV64IA-NEXT:  .LBB90_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB100_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w a3, (a2)
 ; RV64IA-NEXT:    and a7, a3, a4
 ; RV64IA-NEXT:    mv a6, a3
 ; RV64IA-NEXT:    sll a7, a7, a5
 ; RV64IA-NEXT:    sra a7, a7, a5
-; RV64IA-NEXT:    bge a7, a1, .LBB90_3
-; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB90_1 Depth=1
+; RV64IA-NEXT:    bge a7, a1, .LBB100_3
+; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB100_1 Depth=1
 ; RV64IA-NEXT:    xor a6, a3, a1
 ; RV64IA-NEXT:    and a6, a6, a4
 ; RV64IA-NEXT:    xor a6, a3, a6
-; RV64IA-NEXT:  .LBB90_3: # in Loop: Header=BB90_1 Depth=1
+; RV64IA-NEXT:  .LBB100_3: # in Loop: Header=BB100_1 Depth=1
 ; RV64IA-NEXT:    sc.w a6, a6, (a2)
-; RV64IA-NEXT:    bnez a6, .LBB90_1
+; RV64IA-NEXT:    bnez a6, .LBB100_1
 ; RV64IA-NEXT:  # %bb.4:
 ; RV64IA-NEXT:    srlw a0, a3, a0
 ; RV64IA-NEXT:    ret
@@ -7005,9 +7641,9 @@ define i16 @atomicrmw_max_i16_acquire(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    mv s1, a1
 ; RV32I-NEXT:    slli a0, a1, 16
 ; RV32I-NEXT:    srai s2, a0, 16
-; RV32I-NEXT:    j .LBB91_2
-; RV32I-NEXT:  .LBB91_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB91_2 Depth=1
+; RV32I-NEXT:    j .LBB101_2
+; RV32I-NEXT:  .LBB101_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB101_2 Depth=1
 ; RV32I-NEXT:    sh a3, 14(sp)
 ; RV32I-NEXT:    addi a1, sp, 14
 ; RV32I-NEXT:    li a3, 2
@@ -7015,18 +7651,18 @@ define i16 @atomicrmw_max_i16_acquire(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lh a3, 14(sp)
-; RV32I-NEXT:    bnez a0, .LBB91_4
-; RV32I-NEXT:  .LBB91_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB101_4
+; RV32I-NEXT:  .LBB101_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    slli a0, a3, 16
 ; RV32I-NEXT:    srai a0, a0, 16
 ; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    blt s2, a0, .LBB91_1
+; RV32I-NEXT:    blt s2, a0, .LBB101_1
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB91_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB101_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
-; RV32I-NEXT:    j .LBB91_1
-; RV32I-NEXT:  .LBB91_4: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB101_1
+; RV32I-NEXT:  .LBB101_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
@@ -7048,20 +7684,20 @@ define i16 @atomicrmw_max_i16_acquire(ptr %a, i16 %b) nounwind {
 ; RV32IA-NEXT:    sll a1, a1, a0
 ; RV32IA-NEXT:    li a5, 16
 ; RV32IA-NEXT:    sub a5, a5, a3
-; RV32IA-NEXT:  .LBB91_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB101_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w.aq a3, (a2)
 ; RV32IA-NEXT:    and a7, a3, a4
 ; RV32IA-NEXT:    mv a6, a3
 ; RV32IA-NEXT:    sll a7, a7, a5
 ; RV32IA-NEXT:    sra a7, a7, a5
-; RV32IA-NEXT:    bge a7, a1, .LBB91_3
-; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB91_1 Depth=1
+; RV32IA-NEXT:    bge a7, a1, .LBB101_3
+; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB101_1 Depth=1
 ; RV32IA-NEXT:    xor a6, a3, a1
 ; RV32IA-NEXT:    and a6, a6, a4
 ; RV32IA-NEXT:    xor a6, a3, a6
-; RV32IA-NEXT:  .LBB91_3: # in Loop: Header=BB91_1 Depth=1
+; RV32IA-NEXT:  .LBB101_3: # in Loop: Header=BB101_1 Depth=1
 ; RV32IA-NEXT:    sc.w a6, a6, (a2)
-; RV32IA-NEXT:    bnez a6, .LBB91_1
+; RV32IA-NEXT:    bnez a6, .LBB101_1
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a3, a0
 ; RV32IA-NEXT:    ret
@@ -7078,9 +7714,9 @@ define i16 @atomicrmw_max_i16_acquire(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    slli a0, a1, 48
 ; RV64I-NEXT:    srai s2, a0, 48
-; RV64I-NEXT:    j .LBB91_2
-; RV64I-NEXT:  .LBB91_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB91_2 Depth=1
+; RV64I-NEXT:    j .LBB101_2
+; RV64I-NEXT:  .LBB101_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB101_2 Depth=1
 ; RV64I-NEXT:    sh a3, 14(sp)
 ; RV64I-NEXT:    addi a1, sp, 14
 ; RV64I-NEXT:    li a3, 2
@@ -7088,18 +7724,18 @@ define i16 @atomicrmw_max_i16_acquire(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    lh a3, 14(sp)
-; RV64I-NEXT:    bnez a0, .LBB91_4
-; RV64I-NEXT:  .LBB91_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB101_4
+; RV64I-NEXT:  .LBB101_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    slli a0, a3, 48
 ; RV64I-NEXT:    srai a0, a0, 48
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    blt s2, a0, .LBB91_1
+; RV64I-NEXT:    blt s2, a0, .LBB101_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB91_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB101_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB91_1
-; RV64I-NEXT:  .LBB91_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB101_1
+; RV64I-NEXT:  .LBB101_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
@@ -7121,20 +7757,20 @@ define i16 @atomicrmw_max_i16_acquire(ptr %a, i16 %b) nounwind {
 ; RV64IA-NEXT:    sllw a1, a1, a0
 ; RV64IA-NEXT:    li a5, 48
 ; RV64IA-NEXT:    sub a5, a5, a3
-; RV64IA-NEXT:  .LBB91_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB101_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w.aq a3, (a2)
 ; RV64IA-NEXT:    and a7, a3, a4
 ; RV64IA-NEXT:    mv a6, a3
 ; RV64IA-NEXT:    sll a7, a7, a5
 ; RV64IA-NEXT:    sra a7, a7, a5
-; RV64IA-NEXT:    bge a7, a1, .LBB91_3
-; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB91_1 Depth=1
+; RV64IA-NEXT:    bge a7, a1, .LBB101_3
+; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB101_1 Depth=1
 ; RV64IA-NEXT:    xor a6, a3, a1
 ; RV64IA-NEXT:    and a6, a6, a4
 ; RV64IA-NEXT:    xor a6, a3, a6
-; RV64IA-NEXT:  .LBB91_3: # in Loop: Header=BB91_1 Depth=1
+; RV64IA-NEXT:  .LBB101_3: # in Loop: Header=BB101_1 Depth=1
 ; RV64IA-NEXT:    sc.w a6, a6, (a2)
-; RV64IA-NEXT:    bnez a6, .LBB91_1
+; RV64IA-NEXT:    bnez a6, .LBB101_1
 ; RV64IA-NEXT:  # %bb.4:
 ; RV64IA-NEXT:    srlw a0, a3, a0
 ; RV64IA-NEXT:    ret
@@ -7155,9 +7791,9 @@ define i16 @atomicrmw_max_i16_release(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    mv s1, a1
 ; RV32I-NEXT:    slli a0, a1, 16
 ; RV32I-NEXT:    srai s2, a0, 16
-; RV32I-NEXT:    j .LBB92_2
-; RV32I-NEXT:  .LBB92_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB92_2 Depth=1
+; RV32I-NEXT:    j .LBB102_2
+; RV32I-NEXT:  .LBB102_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB102_2 Depth=1
 ; RV32I-NEXT:    sh a3, 14(sp)
 ; RV32I-NEXT:    addi a1, sp, 14
 ; RV32I-NEXT:    li a3, 3
@@ -7165,18 +7801,18 @@ define i16 @atomicrmw_max_i16_release(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lh a3, 14(sp)
-; RV32I-NEXT:    bnez a0, .LBB92_4
-; RV32I-NEXT:  .LBB92_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB102_4
+; RV32I-NEXT:  .LBB102_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    slli a0, a3, 16
 ; RV32I-NEXT:    srai a0, a0, 16
 ; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    blt s2, a0, .LBB92_1
+; RV32I-NEXT:    blt s2, a0, .LBB102_1
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB92_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB102_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
-; RV32I-NEXT:    j .LBB92_1
-; RV32I-NEXT:  .LBB92_4: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB102_1
+; RV32I-NEXT:  .LBB102_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
@@ -7198,20 +7834,20 @@ define i16 @atomicrmw_max_i16_release(ptr %a, i16 %b) nounwind {
 ; RV32IA-NEXT:    sll a1, a1, a0
 ; RV32IA-NEXT:    li a5, 16
 ; RV32IA-NEXT:    sub a5, a5, a3
-; RV32IA-NEXT:  .LBB92_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB102_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w a3, (a2)
 ; RV32IA-NEXT:    and a7, a3, a4
 ; RV32IA-NEXT:    mv a6, a3
 ; RV32IA-NEXT:    sll a7, a7, a5
 ; RV32IA-NEXT:    sra a7, a7, a5
-; RV32IA-NEXT:    bge a7, a1, .LBB92_3
-; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB92_1 Depth=1
+; RV32IA-NEXT:    bge a7, a1, .LBB102_3
+; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB102_1 Depth=1
 ; RV32IA-NEXT:    xor a6, a3, a1
 ; RV32IA-NEXT:    and a6, a6, a4
 ; RV32IA-NEXT:    xor a6, a3, a6
-; RV32IA-NEXT:  .LBB92_3: # in Loop: Header=BB92_1 Depth=1
+; RV32IA-NEXT:  .LBB102_3: # in Loop: Header=BB102_1 Depth=1
 ; RV32IA-NEXT:    sc.w.rl a6, a6, (a2)
-; RV32IA-NEXT:    bnez a6, .LBB92_1
+; RV32IA-NEXT:    bnez a6, .LBB102_1
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a3, a0
 ; RV32IA-NEXT:    ret
@@ -7228,9 +7864,9 @@ define i16 @atomicrmw_max_i16_release(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    slli a0, a1, 48
 ; RV64I-NEXT:    srai s2, a0, 48
-; RV64I-NEXT:    j .LBB92_2
-; RV64I-NEXT:  .LBB92_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB92_2 Depth=1
+; RV64I-NEXT:    j .LBB102_2
+; RV64I-NEXT:  .LBB102_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB102_2 Depth=1
 ; RV64I-NEXT:    sh a3, 14(sp)
 ; RV64I-NEXT:    addi a1, sp, 14
 ; RV64I-NEXT:    li a3, 3
@@ -7238,18 +7874,18 @@ define i16 @atomicrmw_max_i16_release(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    lh a3, 14(sp)
-; RV64I-NEXT:    bnez a0, .LBB92_4
-; RV64I-NEXT:  .LBB92_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB102_4
+; RV64I-NEXT:  .LBB102_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    slli a0, a3, 48
 ; RV64I-NEXT:    srai a0, a0, 48
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    blt s2, a0, .LBB92_1
+; RV64I-NEXT:    blt s2, a0, .LBB102_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB92_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB102_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB92_1
-; RV64I-NEXT:  .LBB92_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB102_1
+; RV64I-NEXT:  .LBB102_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
@@ -7271,20 +7907,20 @@ define i16 @atomicrmw_max_i16_release(ptr %a, i16 %b) nounwind {
 ; RV64IA-NEXT:    sllw a1, a1, a0
 ; RV64IA-NEXT:    li a5, 48
 ; RV64IA-NEXT:    sub a5, a5, a3
-; RV64IA-NEXT:  .LBB92_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB102_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w a3, (a2)
 ; RV64IA-NEXT:    and a7, a3, a4
 ; RV64IA-NEXT:    mv a6, a3
 ; RV64IA-NEXT:    sll a7, a7, a5
 ; RV64IA-NEXT:    sra a7, a7, a5
-; RV64IA-NEXT:    bge a7, a1, .LBB92_3
-; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB92_1 Depth=1
+; RV64IA-NEXT:    bge a7, a1, .LBB102_3
+; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB102_1 Depth=1
 ; RV64IA-NEXT:    xor a6, a3, a1
 ; RV64IA-NEXT:    and a6, a6, a4
 ; RV64IA-NEXT:    xor a6, a3, a6
-; RV64IA-NEXT:  .LBB92_3: # in Loop: Header=BB92_1 Depth=1
+; RV64IA-NEXT:  .LBB102_3: # in Loop: Header=BB102_1 Depth=1
 ; RV64IA-NEXT:    sc.w.rl a6, a6, (a2)
-; RV64IA-NEXT:    bnez a6, .LBB92_1
+; RV64IA-NEXT:    bnez a6, .LBB102_1
 ; RV64IA-NEXT:  # %bb.4:
 ; RV64IA-NEXT:    srlw a0, a3, a0
 ; RV64IA-NEXT:    ret
@@ -7305,9 +7941,9 @@ define i16 @atomicrmw_max_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    mv s1, a1
 ; RV32I-NEXT:    slli a0, a1, 16
 ; RV32I-NEXT:    srai s2, a0, 16
-; RV32I-NEXT:    j .LBB93_2
-; RV32I-NEXT:  .LBB93_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB93_2 Depth=1
+; RV32I-NEXT:    j .LBB103_2
+; RV32I-NEXT:  .LBB103_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB103_2 Depth=1
 ; RV32I-NEXT:    sh a3, 14(sp)
 ; RV32I-NEXT:    addi a1, sp, 14
 ; RV32I-NEXT:    li a3, 4
@@ -7315,18 +7951,18 @@ define i16 @atomicrmw_max_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lh a3, 14(sp)
-; RV32I-NEXT:    bnez a0, .LBB93_4
-; RV32I-NEXT:  .LBB93_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB103_4
+; RV32I-NEXT:  .LBB103_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    slli a0, a3, 16
 ; RV32I-NEXT:    srai a0, a0, 16
 ; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    blt s2, a0, .LBB93_1
+; RV32I-NEXT:    blt s2, a0, .LBB103_1
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB93_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB103_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
-; RV32I-NEXT:    j .LBB93_1
-; RV32I-NEXT:  .LBB93_4: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB103_1
+; RV32I-NEXT:  .LBB103_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
@@ -7348,20 +7984,20 @@ define i16 @atomicrmw_max_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; RV32IA-NEXT:    sll a1, a1, a0
 ; RV32IA-NEXT:    li a5, 16
 ; RV32IA-NEXT:    sub a5, a5, a3
-; RV32IA-NEXT:  .LBB93_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB103_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w.aq a3, (a2)
 ; RV32IA-NEXT:    and a7, a3, a4
 ; RV32IA-NEXT:    mv a6, a3
 ; RV32IA-NEXT:    sll a7, a7, a5
 ; RV32IA-NEXT:    sra a7, a7, a5
-; RV32IA-NEXT:    bge a7, a1, .LBB93_3
-; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB93_1 Depth=1
+; RV32IA-NEXT:    bge a7, a1, .LBB103_3
+; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB103_1 Depth=1
 ; RV32IA-NEXT:    xor a6, a3, a1
 ; RV32IA-NEXT:    and a6, a6, a4
 ; RV32IA-NEXT:    xor a6, a3, a6
-; RV32IA-NEXT:  .LBB93_3: # in Loop: Header=BB93_1 Depth=1
+; RV32IA-NEXT:  .LBB103_3: # in Loop: Header=BB103_1 Depth=1
 ; RV32IA-NEXT:    sc.w.rl a6, a6, (a2)
-; RV32IA-NEXT:    bnez a6, .LBB93_1
+; RV32IA-NEXT:    bnez a6, .LBB103_1
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a3, a0
 ; RV32IA-NEXT:    ret
@@ -7378,9 +8014,9 @@ define i16 @atomicrmw_max_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    slli a0, a1, 48
 ; RV64I-NEXT:    srai s2, a0, 48
-; RV64I-NEXT:    j .LBB93_2
-; RV64I-NEXT:  .LBB93_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB93_2 Depth=1
+; RV64I-NEXT:    j .LBB103_2
+; RV64I-NEXT:  .LBB103_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB103_2 Depth=1
 ; RV64I-NEXT:    sh a3, 14(sp)
 ; RV64I-NEXT:    addi a1, sp, 14
 ; RV64I-NEXT:    li a3, 4
@@ -7388,18 +8024,18 @@ define i16 @atomicrmw_max_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    lh a3, 14(sp)
-; RV64I-NEXT:    bnez a0, .LBB93_4
-; RV64I-NEXT:  .LBB93_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB103_4
+; RV64I-NEXT:  .LBB103_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    slli a0, a3, 48
 ; RV64I-NEXT:    srai a0, a0, 48
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    blt s2, a0, .LBB93_1
+; RV64I-NEXT:    blt s2, a0, .LBB103_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB93_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB103_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB93_1
-; RV64I-NEXT:  .LBB93_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB103_1
+; RV64I-NEXT:  .LBB103_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
@@ -7421,20 +8057,20 @@ define i16 @atomicrmw_max_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; RV64IA-NEXT:    sllw a1, a1, a0
 ; RV64IA-NEXT:    li a5, 48
 ; RV64IA-NEXT:    sub a5, a5, a3
-; RV64IA-NEXT:  .LBB93_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB103_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w.aq a3, (a2)
 ; RV64IA-NEXT:    and a7, a3, a4
 ; RV64IA-NEXT:    mv a6, a3
 ; RV64IA-NEXT:    sll a7, a7, a5
 ; RV64IA-NEXT:    sra a7, a7, a5
-; RV64IA-NEXT:    bge a7, a1, .LBB93_3
-; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB93_1 Depth=1
+; RV64IA-NEXT:    bge a7, a1, .LBB103_3
+; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB103_1 Depth=1
 ; RV64IA-NEXT:    xor a6, a3, a1
 ; RV64IA-NEXT:    and a6, a6, a4
 ; RV64IA-NEXT:    xor a6, a3, a6
-; RV64IA-NEXT:  .LBB93_3: # in Loop: Header=BB93_1 Depth=1
+; RV64IA-NEXT:  .LBB103_3: # in Loop: Header=BB103_1 Depth=1
 ; RV64IA-NEXT:    sc.w.rl a6, a6, (a2)
-; RV64IA-NEXT:    bnez a6, .LBB93_1
+; RV64IA-NEXT:    bnez a6, .LBB103_1
 ; RV64IA-NEXT:  # %bb.4:
 ; RV64IA-NEXT:    srlw a0, a3, a0
 ; RV64IA-NEXT:    ret
@@ -7455,9 +8091,9 @@ define i16 @atomicrmw_max_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    mv s1, a1
 ; RV32I-NEXT:    slli a0, a1, 16
 ; RV32I-NEXT:    srai s2, a0, 16
-; RV32I-NEXT:    j .LBB94_2
-; RV32I-NEXT:  .LBB94_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB94_2 Depth=1
+; RV32I-NEXT:    j .LBB104_2
+; RV32I-NEXT:  .LBB104_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB104_2 Depth=1
 ; RV32I-NEXT:    sh a3, 14(sp)
 ; RV32I-NEXT:    addi a1, sp, 14
 ; RV32I-NEXT:    li a3, 5
@@ -7465,18 +8101,18 @@ define i16 @atomicrmw_max_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lh a3, 14(sp)
-; RV32I-NEXT:    bnez a0, .LBB94_4
-; RV32I-NEXT:  .LBB94_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB104_4
+; RV32I-NEXT:  .LBB104_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    slli a0, a3, 16
 ; RV32I-NEXT:    srai a0, a0, 16
 ; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    blt s2, a0, .LBB94_1
+; RV32I-NEXT:    blt s2, a0, .LBB104_1
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB94_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB104_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
-; RV32I-NEXT:    j .LBB94_1
-; RV32I-NEXT:  .LBB94_4: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB104_1
+; RV32I-NEXT:  .LBB104_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
@@ -7498,20 +8134,20 @@ define i16 @atomicrmw_max_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; RV32IA-NEXT:    sll a1, a1, a0
 ; RV32IA-NEXT:    li a5, 16
 ; RV32IA-NEXT:    sub a5, a5, a3
-; RV32IA-NEXT:  .LBB94_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB104_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w.aqrl a3, (a2)
 ; RV32IA-NEXT:    and a7, a3, a4
 ; RV32IA-NEXT:    mv a6, a3
 ; RV32IA-NEXT:    sll a7, a7, a5
 ; RV32IA-NEXT:    sra a7, a7, a5
-; RV32IA-NEXT:    bge a7, a1, .LBB94_3
-; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB94_1 Depth=1
+; RV32IA-NEXT:    bge a7, a1, .LBB104_3
+; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB104_1 Depth=1
 ; RV32IA-NEXT:    xor a6, a3, a1
 ; RV32IA-NEXT:    and a6, a6, a4
 ; RV32IA-NEXT:    xor a6, a3, a6
-; RV32IA-NEXT:  .LBB94_3: # in Loop: Header=BB94_1 Depth=1
+; RV32IA-NEXT:  .LBB104_3: # in Loop: Header=BB104_1 Depth=1
 ; RV32IA-NEXT:    sc.w.rl a6, a6, (a2)
-; RV32IA-NEXT:    bnez a6, .LBB94_1
+; RV32IA-NEXT:    bnez a6, .LBB104_1
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a3, a0
 ; RV32IA-NEXT:    ret
@@ -7528,9 +8164,9 @@ define i16 @atomicrmw_max_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    slli a0, a1, 48
 ; RV64I-NEXT:    srai s2, a0, 48
-; RV64I-NEXT:    j .LBB94_2
-; RV64I-NEXT:  .LBB94_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB94_2 Depth=1
+; RV64I-NEXT:    j .LBB104_2
+; RV64I-NEXT:  .LBB104_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB104_2 Depth=1
 ; RV64I-NEXT:    sh a3, 14(sp)
 ; RV64I-NEXT:    addi a1, sp, 14
 ; RV64I-NEXT:    li a3, 5
@@ -7538,18 +8174,18 @@ define i16 @atomicrmw_max_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    lh a3, 14(sp)
-; RV64I-NEXT:    bnez a0, .LBB94_4
-; RV64I-NEXT:  .LBB94_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB104_4
+; RV64I-NEXT:  .LBB104_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    slli a0, a3, 48
 ; RV64I-NEXT:    srai a0, a0, 48
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    blt s2, a0, .LBB94_1
+; RV64I-NEXT:    blt s2, a0, .LBB104_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB94_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB104_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB94_1
-; RV64I-NEXT:  .LBB94_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB104_1
+; RV64I-NEXT:  .LBB104_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
@@ -7571,20 +8207,20 @@ define i16 @atomicrmw_max_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; RV64IA-NEXT:    sllw a1, a1, a0
 ; RV64IA-NEXT:    li a5, 48
 ; RV64IA-NEXT:    sub a5, a5, a3
-; RV64IA-NEXT:  .LBB94_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB104_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w.aqrl a3, (a2)
 ; RV64IA-NEXT:    and a7, a3, a4
 ; RV64IA-NEXT:    mv a6, a3
 ; RV64IA-NEXT:    sll a7, a7, a5
 ; RV64IA-NEXT:    sra a7, a7, a5
-; RV64IA-NEXT:    bge a7, a1, .LBB94_3
-; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB94_1 Depth=1
+; RV64IA-NEXT:    bge a7, a1, .LBB104_3
+; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB104_1 Depth=1
 ; RV64IA-NEXT:    xor a6, a3, a1
 ; RV64IA-NEXT:    and a6, a6, a4
 ; RV64IA-NEXT:    xor a6, a3, a6
-; RV64IA-NEXT:  .LBB94_3: # in Loop: Header=BB94_1 Depth=1
+; RV64IA-NEXT:  .LBB104_3: # in Loop: Header=BB104_1 Depth=1
 ; RV64IA-NEXT:    sc.w.rl a6, a6, (a2)
-; RV64IA-NEXT:    bnez a6, .LBB94_1
+; RV64IA-NEXT:    bnez a6, .LBB104_1
 ; RV64IA-NEXT:  # %bb.4:
 ; RV64IA-NEXT:    srlw a0, a3, a0
 ; RV64IA-NEXT:    ret
@@ -7605,9 +8241,9 @@ define i16 @atomicrmw_min_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    mv s1, a1
 ; RV32I-NEXT:    slli a0, a1, 16
 ; RV32I-NEXT:    srai s2, a0, 16
-; RV32I-NEXT:    j .LBB95_2
-; RV32I-NEXT:  .LBB95_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB95_2 Depth=1
+; RV32I-NEXT:    j .LBB105_2
+; RV32I-NEXT:  .LBB105_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB105_2 Depth=1
 ; RV32I-NEXT:    sh a3, 14(sp)
 ; RV32I-NEXT:    addi a1, sp, 14
 ; RV32I-NEXT:    mv a0, s0
@@ -7615,18 +8251,18 @@ define i16 @atomicrmw_min_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lh a3, 14(sp)
-; RV32I-NEXT:    bnez a0, .LBB95_4
-; RV32I-NEXT:  .LBB95_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB105_4
+; RV32I-NEXT:  .LBB105_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    slli a0, a3, 16
 ; RV32I-NEXT:    srai a0, a0, 16
 ; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    bge s2, a0, .LBB95_1
+; RV32I-NEXT:    bge s2, a0, .LBB105_1
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB95_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB105_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
-; RV32I-NEXT:    j .LBB95_1
-; RV32I-NEXT:  .LBB95_4: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB105_1
+; RV32I-NEXT:  .LBB105_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
@@ -7648,20 +8284,20 @@ define i16 @atomicrmw_min_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV32IA-NEXT:    sll a1, a1, a0
 ; RV32IA-NEXT:    li a5, 16
 ; RV32IA-NEXT:    sub a5, a5, a3
-; RV32IA-NEXT:  .LBB95_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB105_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w a3, (a2)
 ; RV32IA-NEXT:    and a7, a3, a4
 ; RV32IA-NEXT:    mv a6, a3
 ; RV32IA-NEXT:    sll a7, a7, a5
 ; RV32IA-NEXT:    sra a7, a7, a5
-; RV32IA-NEXT:    bge a1, a7, .LBB95_3
-; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB95_1 Depth=1
+; RV32IA-NEXT:    bge a1, a7, .LBB105_3
+; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB105_1 Depth=1
 ; RV32IA-NEXT:    xor a6, a3, a1
 ; RV32IA-NEXT:    and a6, a6, a4
 ; RV32IA-NEXT:    xor a6, a3, a6
-; RV32IA-NEXT:  .LBB95_3: # in Loop: Header=BB95_1 Depth=1
+; RV32IA-NEXT:  .LBB105_3: # in Loop: Header=BB105_1 Depth=1
 ; RV32IA-NEXT:    sc.w a6, a6, (a2)
-; RV32IA-NEXT:    bnez a6, .LBB95_1
+; RV32IA-NEXT:    bnez a6, .LBB105_1
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a3, a0
 ; RV32IA-NEXT:    ret
@@ -7678,9 +8314,9 @@ define i16 @atomicrmw_min_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    slli a0, a1, 48
 ; RV64I-NEXT:    srai s2, a0, 48
-; RV64I-NEXT:    j .LBB95_2
-; RV64I-NEXT:  .LBB95_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB95_2 Depth=1
+; RV64I-NEXT:    j .LBB105_2
+; RV64I-NEXT:  .LBB105_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB105_2 Depth=1
 ; RV64I-NEXT:    sh a3, 14(sp)
 ; RV64I-NEXT:    addi a1, sp, 14
 ; RV64I-NEXT:    mv a0, s0
@@ -7688,18 +8324,18 @@ define i16 @atomicrmw_min_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    lh a3, 14(sp)
-; RV64I-NEXT:    bnez a0, .LBB95_4
-; RV64I-NEXT:  .LBB95_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB105_4
+; RV64I-NEXT:  .LBB105_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    slli a0, a3, 48
 ; RV64I-NEXT:    srai a0, a0, 48
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    bge s2, a0, .LBB95_1
+; RV64I-NEXT:    bge s2, a0, .LBB105_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB95_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB105_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB95_1
-; RV64I-NEXT:  .LBB95_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB105_1
+; RV64I-NEXT:  .LBB105_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
@@ -7721,20 +8357,20 @@ define i16 @atomicrmw_min_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV64IA-NEXT:    sllw a1, a1, a0
 ; RV64IA-NEXT:    li a5, 48
 ; RV64IA-NEXT:    sub a5, a5, a3
-; RV64IA-NEXT:  .LBB95_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB105_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w a3, (a2)
 ; RV64IA-NEXT:    and a7, a3, a4
 ; RV64IA-NEXT:    mv a6, a3
 ; RV64IA-NEXT:    sll a7, a7, a5
 ; RV64IA-NEXT:    sra a7, a7, a5
-; RV64IA-NEXT:    bge a1, a7, .LBB95_3
-; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB95_1 Depth=1
+; RV64IA-NEXT:    bge a1, a7, .LBB105_3
+; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB105_1 Depth=1
 ; RV64IA-NEXT:    xor a6, a3, a1
 ; RV64IA-NEXT:    and a6, a6, a4
 ; RV64IA-NEXT:    xor a6, a3, a6
-; RV64IA-NEXT:  .LBB95_3: # in Loop: Header=BB95_1 Depth=1
+; RV64IA-NEXT:  .LBB105_3: # in Loop: Header=BB105_1 Depth=1
 ; RV64IA-NEXT:    sc.w a6, a6, (a2)
-; RV64IA-NEXT:    bnez a6, .LBB95_1
+; RV64IA-NEXT:    bnez a6, .LBB105_1
 ; RV64IA-NEXT:  # %bb.4:
 ; RV64IA-NEXT:    srlw a0, a3, a0
 ; RV64IA-NEXT:    ret
@@ -7755,9 +8391,9 @@ define i16 @atomicrmw_min_i16_acquire(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    mv s1, a1
 ; RV32I-NEXT:    slli a0, a1, 16
 ; RV32I-NEXT:    srai s2, a0, 16
-; RV32I-NEXT:    j .LBB96_2
-; RV32I-NEXT:  .LBB96_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB96_2 Depth=1
+; RV32I-NEXT:    j .LBB106_2
+; RV32I-NEXT:  .LBB106_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB106_2 Depth=1
 ; RV32I-NEXT:    sh a3, 14(sp)
 ; RV32I-NEXT:    addi a1, sp, 14
 ; RV32I-NEXT:    li a3, 2
@@ -7765,18 +8401,18 @@ define i16 @atomicrmw_min_i16_acquire(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lh a3, 14(sp)
-; RV32I-NEXT:    bnez a0, .LBB96_4
-; RV32I-NEXT:  .LBB96_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB106_4
+; RV32I-NEXT:  .LBB106_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    slli a0, a3, 16
 ; RV32I-NEXT:    srai a0, a0, 16
 ; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    bge s2, a0, .LBB96_1
+; RV32I-NEXT:    bge s2, a0, .LBB106_1
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB96_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB106_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
-; RV32I-NEXT:    j .LBB96_1
-; RV32I-NEXT:  .LBB96_4: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB106_1
+; RV32I-NEXT:  .LBB106_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
@@ -7798,20 +8434,20 @@ define i16 @atomicrmw_min_i16_acquire(ptr %a, i16 %b) nounwind {
 ; RV32IA-NEXT:    sll a1, a1, a0
 ; RV32IA-NEXT:    li a5, 16
 ; RV32IA-NEXT:    sub a5, a5, a3
-; RV32IA-NEXT:  .LBB96_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB106_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w.aq a3, (a2)
 ; RV32IA-NEXT:    and a7, a3, a4
 ; RV32IA-NEXT:    mv a6, a3
 ; RV32IA-NEXT:    sll a7, a7, a5
 ; RV32IA-NEXT:    sra a7, a7, a5
-; RV32IA-NEXT:    bge a1, a7, .LBB96_3
-; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB96_1 Depth=1
+; RV32IA-NEXT:    bge a1, a7, .LBB106_3
+; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB106_1 Depth=1
 ; RV32IA-NEXT:    xor a6, a3, a1
 ; RV32IA-NEXT:    and a6, a6, a4
 ; RV32IA-NEXT:    xor a6, a3, a6
-; RV32IA-NEXT:  .LBB96_3: # in Loop: Header=BB96_1 Depth=1
+; RV32IA-NEXT:  .LBB106_3: # in Loop: Header=BB106_1 Depth=1
 ; RV32IA-NEXT:    sc.w a6, a6, (a2)
-; RV32IA-NEXT:    bnez a6, .LBB96_1
+; RV32IA-NEXT:    bnez a6, .LBB106_1
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a3, a0
 ; RV32IA-NEXT:    ret
@@ -7828,9 +8464,9 @@ define i16 @atomicrmw_min_i16_acquire(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    slli a0, a1, 48
 ; RV64I-NEXT:    srai s2, a0, 48
-; RV64I-NEXT:    j .LBB96_2
-; RV64I-NEXT:  .LBB96_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB96_2 Depth=1
+; RV64I-NEXT:    j .LBB106_2
+; RV64I-NEXT:  .LBB106_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB106_2 Depth=1
 ; RV64I-NEXT:    sh a3, 14(sp)
 ; RV64I-NEXT:    addi a1, sp, 14
 ; RV64I-NEXT:    li a3, 2
@@ -7838,18 +8474,18 @@ define i16 @atomicrmw_min_i16_acquire(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    lh a3, 14(sp)
-; RV64I-NEXT:    bnez a0, .LBB96_4
-; RV64I-NEXT:  .LBB96_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB106_4
+; RV64I-NEXT:  .LBB106_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    slli a0, a3, 48
 ; RV64I-NEXT:    srai a0, a0, 48
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    bge s2, a0, .LBB96_1
+; RV64I-NEXT:    bge s2, a0, .LBB106_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB96_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB106_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB96_1
-; RV64I-NEXT:  .LBB96_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB106_1
+; RV64I-NEXT:  .LBB106_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
@@ -7871,20 +8507,20 @@ define i16 @atomicrmw_min_i16_acquire(ptr %a, i16 %b) nounwind {
 ; RV64IA-NEXT:    sllw a1, a1, a0
 ; RV64IA-NEXT:    li a5, 48
 ; RV64IA-NEXT:    sub a5, a5, a3
-; RV64IA-NEXT:  .LBB96_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB106_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w.aq a3, (a2)
 ; RV64IA-NEXT:    and a7, a3, a4
 ; RV64IA-NEXT:    mv a6, a3
 ; RV64IA-NEXT:    sll a7, a7, a5
 ; RV64IA-NEXT:    sra a7, a7, a5
-; RV64IA-NEXT:    bge a1, a7, .LBB96_3
-; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB96_1 Depth=1
+; RV64IA-NEXT:    bge a1, a7, .LBB106_3
+; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB106_1 Depth=1
 ; RV64IA-NEXT:    xor a6, a3, a1
 ; RV64IA-NEXT:    and a6, a6, a4
 ; RV64IA-NEXT:    xor a6, a3, a6
-; RV64IA-NEXT:  .LBB96_3: # in Loop: Header=BB96_1 Depth=1
+; RV64IA-NEXT:  .LBB106_3: # in Loop: Header=BB106_1 Depth=1
 ; RV64IA-NEXT:    sc.w a6, a6, (a2)
-; RV64IA-NEXT:    bnez a6, .LBB96_1
+; RV64IA-NEXT:    bnez a6, .LBB106_1
 ; RV64IA-NEXT:  # %bb.4:
 ; RV64IA-NEXT:    srlw a0, a3, a0
 ; RV64IA-NEXT:    ret
@@ -7905,9 +8541,9 @@ define i16 @atomicrmw_min_i16_release(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    mv s1, a1
 ; RV32I-NEXT:    slli a0, a1, 16
 ; RV32I-NEXT:    srai s2, a0, 16
-; RV32I-NEXT:    j .LBB97_2
-; RV32I-NEXT:  .LBB97_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB97_2 Depth=1
+; RV32I-NEXT:    j .LBB107_2
+; RV32I-NEXT:  .LBB107_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB107_2 Depth=1
 ; RV32I-NEXT:    sh a3, 14(sp)
 ; RV32I-NEXT:    addi a1, sp, 14
 ; RV32I-NEXT:    li a3, 3
@@ -7915,18 +8551,18 @@ define i16 @atomicrmw_min_i16_release(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lh a3, 14(sp)
-; RV32I-NEXT:    bnez a0, .LBB97_4
-; RV32I-NEXT:  .LBB97_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB107_4
+; RV32I-NEXT:  .LBB107_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    slli a0, a3, 16
 ; RV32I-NEXT:    srai a0, a0, 16
 ; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    bge s2, a0, .LBB97_1
+; RV32I-NEXT:    bge s2, a0, .LBB107_1
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB97_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB107_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
-; RV32I-NEXT:    j .LBB97_1
-; RV32I-NEXT:  .LBB97_4: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB107_1
+; RV32I-NEXT:  .LBB107_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
@@ -7948,20 +8584,20 @@ define i16 @atomicrmw_min_i16_release(ptr %a, i16 %b) nounwind {
 ; RV32IA-NEXT:    sll a1, a1, a0
 ; RV32IA-NEXT:    li a5, 16
 ; RV32IA-NEXT:    sub a5, a5, a3
-; RV32IA-NEXT:  .LBB97_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB107_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w a3, (a2)
 ; RV32IA-NEXT:    and a7, a3, a4
 ; RV32IA-NEXT:    mv a6, a3
 ; RV32IA-NEXT:    sll a7, a7, a5
 ; RV32IA-NEXT:    sra a7, a7, a5
-; RV32IA-NEXT:    bge a1, a7, .LBB97_3
-; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB97_1 Depth=1
+; RV32IA-NEXT:    bge a1, a7, .LBB107_3
+; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB107_1 Depth=1
 ; RV32IA-NEXT:    xor a6, a3, a1
 ; RV32IA-NEXT:    and a6, a6, a4
 ; RV32IA-NEXT:    xor a6, a3, a6
-; RV32IA-NEXT:  .LBB97_3: # in Loop: Header=BB97_1 Depth=1
+; RV32IA-NEXT:  .LBB107_3: # in Loop: Header=BB107_1 Depth=1
 ; RV32IA-NEXT:    sc.w.rl a6, a6, (a2)
-; RV32IA-NEXT:    bnez a6, .LBB97_1
+; RV32IA-NEXT:    bnez a6, .LBB107_1
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a3, a0
 ; RV32IA-NEXT:    ret
@@ -7978,9 +8614,9 @@ define i16 @atomicrmw_min_i16_release(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    slli a0, a1, 48
 ; RV64I-NEXT:    srai s2, a0, 48
-; RV64I-NEXT:    j .LBB97_2
-; RV64I-NEXT:  .LBB97_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB97_2 Depth=1
+; RV64I-NEXT:    j .LBB107_2
+; RV64I-NEXT:  .LBB107_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB107_2 Depth=1
 ; RV64I-NEXT:    sh a3, 14(sp)
 ; RV64I-NEXT:    addi a1, sp, 14
 ; RV64I-NEXT:    li a3, 3
@@ -7988,18 +8624,18 @@ define i16 @atomicrmw_min_i16_release(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    lh a3, 14(sp)
-; RV64I-NEXT:    bnez a0, .LBB97_4
-; RV64I-NEXT:  .LBB97_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB107_4
+; RV64I-NEXT:  .LBB107_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    slli a0, a3, 48
 ; RV64I-NEXT:    srai a0, a0, 48
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    bge s2, a0, .LBB97_1
+; RV64I-NEXT:    bge s2, a0, .LBB107_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB97_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB107_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB97_1
-; RV64I-NEXT:  .LBB97_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB107_1
+; RV64I-NEXT:  .LBB107_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
@@ -8021,20 +8657,20 @@ define i16 @atomicrmw_min_i16_release(ptr %a, i16 %b) nounwind {
 ; RV64IA-NEXT:    sllw a1, a1, a0
 ; RV64IA-NEXT:    li a5, 48
 ; RV64IA-NEXT:    sub a5, a5, a3
-; RV64IA-NEXT:  .LBB97_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB107_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w a3, (a2)
 ; RV64IA-NEXT:    and a7, a3, a4
 ; RV64IA-NEXT:    mv a6, a3
 ; RV64IA-NEXT:    sll a7, a7, a5
 ; RV64IA-NEXT:    sra a7, a7, a5
-; RV64IA-NEXT:    bge a1, a7, .LBB97_3
-; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB97_1 Depth=1
+; RV64IA-NEXT:    bge a1, a7, .LBB107_3
+; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB107_1 Depth=1
 ; RV64IA-NEXT:    xor a6, a3, a1
 ; RV64IA-NEXT:    and a6, a6, a4
 ; RV64IA-NEXT:    xor a6, a3, a6
-; RV64IA-NEXT:  .LBB97_3: # in Loop: Header=BB97_1 Depth=1
+; RV64IA-NEXT:  .LBB107_3: # in Loop: Header=BB107_1 Depth=1
 ; RV64IA-NEXT:    sc.w.rl a6, a6, (a2)
-; RV64IA-NEXT:    bnez a6, .LBB97_1
+; RV64IA-NEXT:    bnez a6, .LBB107_1
 ; RV64IA-NEXT:  # %bb.4:
 ; RV64IA-NEXT:    srlw a0, a3, a0
 ; RV64IA-NEXT:    ret
@@ -8055,9 +8691,9 @@ define i16 @atomicrmw_min_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    mv s1, a1
 ; RV32I-NEXT:    slli a0, a1, 16
 ; RV32I-NEXT:    srai s2, a0, 16
-; RV32I-NEXT:    j .LBB98_2
-; RV32I-NEXT:  .LBB98_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB98_2 Depth=1
+; RV32I-NEXT:    j .LBB108_2
+; RV32I-NEXT:  .LBB108_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB108_2 Depth=1
 ; RV32I-NEXT:    sh a3, 14(sp)
 ; RV32I-NEXT:    addi a1, sp, 14
 ; RV32I-NEXT:    li a3, 4
@@ -8065,18 +8701,18 @@ define i16 @atomicrmw_min_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lh a3, 14(sp)
-; RV32I-NEXT:    bnez a0, .LBB98_4
-; RV32I-NEXT:  .LBB98_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB108_4
+; RV32I-NEXT:  .LBB108_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    slli a0, a3, 16
 ; RV32I-NEXT:    srai a0, a0, 16
 ; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    bge s2, a0, .LBB98_1
+; RV32I-NEXT:    bge s2, a0, .LBB108_1
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB98_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB108_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
-; RV32I-NEXT:    j .LBB98_1
-; RV32I-NEXT:  .LBB98_4: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB108_1
+; RV32I-NEXT:  .LBB108_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
@@ -8098,20 +8734,20 @@ define i16 @atomicrmw_min_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; RV32IA-NEXT:    sll a1, a1, a0
 ; RV32IA-NEXT:    li a5, 16
 ; RV32IA-NEXT:    sub a5, a5, a3
-; RV32IA-NEXT:  .LBB98_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB108_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w.aq a3, (a2)
 ; RV32IA-NEXT:    and a7, a3, a4
 ; RV32IA-NEXT:    mv a6, a3
 ; RV32IA-NEXT:    sll a7, a7, a5
 ; RV32IA-NEXT:    sra a7, a7, a5
-; RV32IA-NEXT:    bge a1, a7, .LBB98_3
-; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB98_1 Depth=1
+; RV32IA-NEXT:    bge a1, a7, .LBB108_3
+; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB108_1 Depth=1
 ; RV32IA-NEXT:    xor a6, a3, a1
 ; RV32IA-NEXT:    and a6, a6, a4
 ; RV32IA-NEXT:    xor a6, a3, a6
-; RV32IA-NEXT:  .LBB98_3: # in Loop: Header=BB98_1 Depth=1
+; RV32IA-NEXT:  .LBB108_3: # in Loop: Header=BB108_1 Depth=1
 ; RV32IA-NEXT:    sc.w.rl a6, a6, (a2)
-; RV32IA-NEXT:    bnez a6, .LBB98_1
+; RV32IA-NEXT:    bnez a6, .LBB108_1
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a3, a0
 ; RV32IA-NEXT:    ret
@@ -8128,9 +8764,9 @@ define i16 @atomicrmw_min_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    slli a0, a1, 48
 ; RV64I-NEXT:    srai s2, a0, 48
-; RV64I-NEXT:    j .LBB98_2
-; RV64I-NEXT:  .LBB98_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB98_2 Depth=1
+; RV64I-NEXT:    j .LBB108_2
+; RV64I-NEXT:  .LBB108_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB108_2 Depth=1
 ; RV64I-NEXT:    sh a3, 14(sp)
 ; RV64I-NEXT:    addi a1, sp, 14
 ; RV64I-NEXT:    li a3, 4
@@ -8138,18 +8774,18 @@ define i16 @atomicrmw_min_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    lh a3, 14(sp)
-; RV64I-NEXT:    bnez a0, .LBB98_4
-; RV64I-NEXT:  .LBB98_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB108_4
+; RV64I-NEXT:  .LBB108_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    slli a0, a3, 48
 ; RV64I-NEXT:    srai a0, a0, 48
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    bge s2, a0, .LBB98_1
+; RV64I-NEXT:    bge s2, a0, .LBB108_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB98_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB108_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB98_1
-; RV64I-NEXT:  .LBB98_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB108_1
+; RV64I-NEXT:  .LBB108_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
@@ -8171,20 +8807,20 @@ define i16 @atomicrmw_min_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; RV64IA-NEXT:    sllw a1, a1, a0
 ; RV64IA-NEXT:    li a5, 48
 ; RV64IA-NEXT:    sub a5, a5, a3
-; RV64IA-NEXT:  .LBB98_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB108_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w.aq a3, (a2)
 ; RV64IA-NEXT:    and a7, a3, a4
 ; RV64IA-NEXT:    mv a6, a3
 ; RV64IA-NEXT:    sll a7, a7, a5
 ; RV64IA-NEXT:    sra a7, a7, a5
-; RV64IA-NEXT:    bge a1, a7, .LBB98_3
-; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB98_1 Depth=1
+; RV64IA-NEXT:    bge a1, a7, .LBB108_3
+; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB108_1 Depth=1
 ; RV64IA-NEXT:    xor a6, a3, a1
 ; RV64IA-NEXT:    and a6, a6, a4
 ; RV64IA-NEXT:    xor a6, a3, a6
-; RV64IA-NEXT:  .LBB98_3: # in Loop: Header=BB98_1 Depth=1
+; RV64IA-NEXT:  .LBB108_3: # in Loop: Header=BB108_1 Depth=1
 ; RV64IA-NEXT:    sc.w.rl a6, a6, (a2)
-; RV64IA-NEXT:    bnez a6, .LBB98_1
+; RV64IA-NEXT:    bnez a6, .LBB108_1
 ; RV64IA-NEXT:  # %bb.4:
 ; RV64IA-NEXT:    srlw a0, a3, a0
 ; RV64IA-NEXT:    ret
@@ -8205,9 +8841,9 @@ define i16 @atomicrmw_min_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    mv s1, a1
 ; RV32I-NEXT:    slli a0, a1, 16
 ; RV32I-NEXT:    srai s2, a0, 16
-; RV32I-NEXT:    j .LBB99_2
-; RV32I-NEXT:  .LBB99_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB99_2 Depth=1
+; RV32I-NEXT:    j .LBB109_2
+; RV32I-NEXT:  .LBB109_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB109_2 Depth=1
 ; RV32I-NEXT:    sh a3, 14(sp)
 ; RV32I-NEXT:    addi a1, sp, 14
 ; RV32I-NEXT:    li a3, 5
@@ -8215,18 +8851,18 @@ define i16 @atomicrmw_min_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lh a3, 14(sp)
-; RV32I-NEXT:    bnez a0, .LBB99_4
-; RV32I-NEXT:  .LBB99_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB109_4
+; RV32I-NEXT:  .LBB109_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    slli a0, a3, 16
 ; RV32I-NEXT:    srai a0, a0, 16
 ; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    bge s2, a0, .LBB99_1
+; RV32I-NEXT:    bge s2, a0, .LBB109_1
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB99_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB109_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
-; RV32I-NEXT:    j .LBB99_1
-; RV32I-NEXT:  .LBB99_4: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB109_1
+; RV32I-NEXT:  .LBB109_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
@@ -8248,20 +8884,20 @@ define i16 @atomicrmw_min_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; RV32IA-NEXT:    sll a1, a1, a0
 ; RV32IA-NEXT:    li a5, 16
 ; RV32IA-NEXT:    sub a5, a5, a3
-; RV32IA-NEXT:  .LBB99_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB109_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w.aqrl a3, (a2)
 ; RV32IA-NEXT:    and a7, a3, a4
 ; RV32IA-NEXT:    mv a6, a3
 ; RV32IA-NEXT:    sll a7, a7, a5
 ; RV32IA-NEXT:    sra a7, a7, a5
-; RV32IA-NEXT:    bge a1, a7, .LBB99_3
-; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB99_1 Depth=1
+; RV32IA-NEXT:    bge a1, a7, .LBB109_3
+; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB109_1 Depth=1
 ; RV32IA-NEXT:    xor a6, a3, a1
 ; RV32IA-NEXT:    and a6, a6, a4
 ; RV32IA-NEXT:    xor a6, a3, a6
-; RV32IA-NEXT:  .LBB99_3: # in Loop: Header=BB99_1 Depth=1
+; RV32IA-NEXT:  .LBB109_3: # in Loop: Header=BB109_1 Depth=1
 ; RV32IA-NEXT:    sc.w.rl a6, a6, (a2)
-; RV32IA-NEXT:    bnez a6, .LBB99_1
+; RV32IA-NEXT:    bnez a6, .LBB109_1
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a3, a0
 ; RV32IA-NEXT:    ret
@@ -8278,9 +8914,9 @@ define i16 @atomicrmw_min_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    slli a0, a1, 48
 ; RV64I-NEXT:    srai s2, a0, 48
-; RV64I-NEXT:    j .LBB99_2
-; RV64I-NEXT:  .LBB99_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB99_2 Depth=1
+; RV64I-NEXT:    j .LBB109_2
+; RV64I-NEXT:  .LBB109_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB109_2 Depth=1
 ; RV64I-NEXT:    sh a3, 14(sp)
 ; RV64I-NEXT:    addi a1, sp, 14
 ; RV64I-NEXT:    li a3, 5
@@ -8288,18 +8924,18 @@ define i16 @atomicrmw_min_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    lh a3, 14(sp)
-; RV64I-NEXT:    bnez a0, .LBB99_4
-; RV64I-NEXT:  .LBB99_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB109_4
+; RV64I-NEXT:  .LBB109_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    slli a0, a3, 48
 ; RV64I-NEXT:    srai a0, a0, 48
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    bge s2, a0, .LBB99_1
+; RV64I-NEXT:    bge s2, a0, .LBB109_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB99_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB109_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB99_1
-; RV64I-NEXT:  .LBB99_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB109_1
+; RV64I-NEXT:  .LBB109_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
@@ -8321,20 +8957,20 @@ define i16 @atomicrmw_min_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; RV64IA-NEXT:    sllw a1, a1, a0
 ; RV64IA-NEXT:    li a5, 48
 ; RV64IA-NEXT:    sub a5, a5, a3
-; RV64IA-NEXT:  .LBB99_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB109_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w.aqrl a3, (a2)
 ; RV64IA-NEXT:    and a7, a3, a4
 ; RV64IA-NEXT:    mv a6, a3
 ; RV64IA-NEXT:    sll a7, a7, a5
 ; RV64IA-NEXT:    sra a7, a7, a5
-; RV64IA-NEXT:    bge a1, a7, .LBB99_3
-; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB99_1 Depth=1
+; RV64IA-NEXT:    bge a1, a7, .LBB109_3
+; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB109_1 Depth=1
 ; RV64IA-NEXT:    xor a6, a3, a1
 ; RV64IA-NEXT:    and a6, a6, a4
 ; RV64IA-NEXT:    xor a6, a3, a6
-; RV64IA-NEXT:  .LBB99_3: # in Loop: Header=BB99_1 Depth=1
+; RV64IA-NEXT:  .LBB109_3: # in Loop: Header=BB109_1 Depth=1
 ; RV64IA-NEXT:    sc.w.rl a6, a6, (a2)
-; RV64IA-NEXT:    bnez a6, .LBB99_1
+; RV64IA-NEXT:    bnez a6, .LBB109_1
 ; RV64IA-NEXT:  # %bb.4:
 ; RV64IA-NEXT:    srlw a0, a3, a0
 ; RV64IA-NEXT:    ret
@@ -8357,9 +8993,9 @@ define i16 @atomicrmw_umax_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    lui s2, 16
 ; RV32I-NEXT:    addi s2, s2, -1
 ; RV32I-NEXT:    and s3, s0, s2
-; RV32I-NEXT:    j .LBB100_2
-; RV32I-NEXT:  .LBB100_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB100_2 Depth=1
+; RV32I-NEXT:    j .LBB110_2
+; RV32I-NEXT:  .LBB110_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB110_2 Depth=1
 ; RV32I-NEXT:    sh a1, 10(sp)
 ; RV32I-NEXT:    addi a1, sp, 10
 ; RV32I-NEXT:    mv a0, s1
@@ -8367,17 +9003,17 @@ define i16 @atomicrmw_umax_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lh a1, 10(sp)
-; RV32I-NEXT:    bnez a0, .LBB100_4
-; RV32I-NEXT:  .LBB100_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB110_4
+; RV32I-NEXT:  .LBB110_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    and a0, a1, s2
 ; RV32I-NEXT:    mv a2, a1
-; RV32I-NEXT:    bltu s3, a0, .LBB100_1
+; RV32I-NEXT:    bltu s3, a0, .LBB110_1
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB100_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB110_2 Depth=1
 ; RV32I-NEXT:    mv a2, s0
-; RV32I-NEXT:    j .LBB100_1
-; RV32I-NEXT:  .LBB100_4: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB110_1
+; RV32I-NEXT:  .LBB110_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a1
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
@@ -8396,18 +9032,18 @@ define i16 @atomicrmw_umax_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV32IA-NEXT:    sll a4, a3, a0
 ; RV32IA-NEXT:    and a1, a1, a3
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:  .LBB100_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB110_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w a3, (a2)
 ; RV32IA-NEXT:    and a6, a3, a4
 ; RV32IA-NEXT:    mv a5, a3
-; RV32IA-NEXT:    bgeu a6, a1, .LBB100_3
-; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB100_1 Depth=1
+; RV32IA-NEXT:    bgeu a6, a1, .LBB110_3
+; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB110_1 Depth=1
 ; RV32IA-NEXT:    xor a5, a3, a1
 ; RV32IA-NEXT:    and a5, a5, a4
 ; RV32IA-NEXT:    xor a5, a3, a5
-; RV32IA-NEXT:  .LBB100_3: # in Loop: Header=BB100_1 Depth=1
+; RV32IA-NEXT:  .LBB110_3: # in Loop: Header=BB110_1 Depth=1
 ; RV32IA-NEXT:    sc.w a5, a5, (a2)
-; RV32IA-NEXT:    bnez a5, .LBB100_1
+; RV32IA-NEXT:    bnez a5, .LBB110_1
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a3, a0
 ; RV32IA-NEXT:    ret
@@ -8426,9 +9062,9 @@ define i16 @atomicrmw_umax_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    lui s2, 16
 ; RV64I-NEXT:    addiw s2, s2, -1
 ; RV64I-NEXT:    and s3, s0, s2
-; RV64I-NEXT:    j .LBB100_2
-; RV64I-NEXT:  .LBB100_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB100_2 Depth=1
+; RV64I-NEXT:    j .LBB110_2
+; RV64I-NEXT:  .LBB110_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB110_2 Depth=1
 ; RV64I-NEXT:    sh a1, 6(sp)
 ; RV64I-NEXT:    addi a1, sp, 6
 ; RV64I-NEXT:    mv a0, s1
@@ -8436,17 +9072,17 @@ define i16 @atomicrmw_umax_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    lh a1, 6(sp)
-; RV64I-NEXT:    bnez a0, .LBB100_4
-; RV64I-NEXT:  .LBB100_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB110_4
+; RV64I-NEXT:  .LBB110_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    and a0, a1, s2
 ; RV64I-NEXT:    mv a2, a1
-; RV64I-NEXT:    bltu s3, a0, .LBB100_1
+; RV64I-NEXT:    bltu s3, a0, .LBB110_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB100_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB110_2 Depth=1
 ; RV64I-NEXT:    mv a2, s0
-; RV64I-NEXT:    j .LBB100_1
-; RV64I-NEXT:  .LBB100_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB110_1
+; RV64I-NEXT:  .LBB110_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a1
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
@@ -8465,18 +9101,18 @@ define i16 @atomicrmw_umax_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV64IA-NEXT:    sllw a4, a3, a0
 ; RV64IA-NEXT:    and a1, a1, a3
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:  .LBB100_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB110_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w a3, (a2)
 ; RV64IA-NEXT:    and a6, a3, a4
 ; RV64IA-NEXT:    mv a5, a3
-; RV64IA-NEXT:    bgeu a6, a1, .LBB100_3
-; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB100_1 Depth=1
+; RV64IA-NEXT:    bgeu a6, a1, .LBB110_3
+; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB110_1 Depth=1
 ; RV64IA-NEXT:    xor a5, a3, a1
 ; RV64IA-NEXT:    and a5, a5, a4
 ; RV64IA-NEXT:    xor a5, a3, a5
-; RV64IA-NEXT:  .LBB100_3: # in Loop: Header=BB100_1 Depth=1
+; RV64IA-NEXT:  .LBB110_3: # in Loop: Header=BB110_1 Depth=1
 ; RV64IA-NEXT:    sc.w a5, a5, (a2)
-; RV64IA-NEXT:    bnez a5, .LBB100_1
+; RV64IA-NEXT:    bnez a5, .LBB110_1
 ; RV64IA-NEXT:  # %bb.4:
 ; RV64IA-NEXT:    srlw a0, a3, a0
 ; RV64IA-NEXT:    ret
@@ -8498,10 +9134,10 @@ define i16 @atomicrmw_umax_i16_acquire(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    lhu a1, 0(a0)
 ; RV32I-NEXT:    lui s2, 16
 ; RV32I-NEXT:    addi s2, s2, -1
-; RV32I-NEXT:    and s3, s0, s2
-; RV32I-NEXT:    j .LBB101_2
-; RV32I-NEXT:  .LBB101_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB101_2 Depth=1
+; RV32I-NEXT:    and s3, s0, s2
+; RV32I-NEXT:    j .LBB111_2
+; RV32I-NEXT:  .LBB111_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB111_2 Depth=1
 ; RV32I-NEXT:    sh a1, 10(sp)
 ; RV32I-NEXT:    addi a1, sp, 10
 ; RV32I-NEXT:    li a3, 2
@@ -8509,17 +9145,17 @@ define i16 @atomicrmw_umax_i16_acquire(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lh a1, 10(sp)
-; RV32I-NEXT:    bnez a0, .LBB101_4
-; RV32I-NEXT:  .LBB101_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB111_4
+; RV32I-NEXT:  .LBB111_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    and a0, a1, s2
 ; RV32I-NEXT:    mv a2, a1
-; RV32I-NEXT:    bltu s3, a0, .LBB101_1
+; RV32I-NEXT:    bltu s3, a0, .LBB111_1
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB101_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB111_2 Depth=1
 ; RV32I-NEXT:    mv a2, s0
-; RV32I-NEXT:    j .LBB101_1
-; RV32I-NEXT:  .LBB101_4: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB111_1
+; RV32I-NEXT:  .LBB111_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a1
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
@@ -8538,18 +9174,18 @@ define i16 @atomicrmw_umax_i16_acquire(ptr %a, i16 %b) nounwind {
 ; RV32IA-NEXT:    sll a4, a3, a0
 ; RV32IA-NEXT:    and a1, a1, a3
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:  .LBB101_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB111_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w.aq a3, (a2)
 ; RV32IA-NEXT:    and a6, a3, a4
 ; RV32IA-NEXT:    mv a5, a3
-; RV32IA-NEXT:    bgeu a6, a1, .LBB101_3
-; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB101_1 Depth=1
+; RV32IA-NEXT:    bgeu a6, a1, .LBB111_3
+; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB111_1 Depth=1
 ; RV32IA-NEXT:    xor a5, a3, a1
 ; RV32IA-NEXT:    and a5, a5, a4
 ; RV32IA-NEXT:    xor a5, a3, a5
-; RV32IA-NEXT:  .LBB101_3: # in Loop: Header=BB101_1 Depth=1
+; RV32IA-NEXT:  .LBB111_3: # in Loop: Header=BB111_1 Depth=1
 ; RV32IA-NEXT:    sc.w a5, a5, (a2)
-; RV32IA-NEXT:    bnez a5, .LBB101_1
+; RV32IA-NEXT:    bnez a5, .LBB111_1
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a3, a0
 ; RV32IA-NEXT:    ret
@@ -8568,9 +9204,9 @@ define i16 @atomicrmw_umax_i16_acquire(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    lui s2, 16
 ; RV64I-NEXT:    addiw s2, s2, -1
 ; RV64I-NEXT:    and s3, s0, s2
-; RV64I-NEXT:    j .LBB101_2
-; RV64I-NEXT:  .LBB101_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB101_2 Depth=1
+; RV64I-NEXT:    j .LBB111_2
+; RV64I-NEXT:  .LBB111_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB111_2 Depth=1
 ; RV64I-NEXT:    sh a1, 6(sp)
 ; RV64I-NEXT:    addi a1, sp, 6
 ; RV64I-NEXT:    li a3, 2
@@ -8578,17 +9214,17 @@ define i16 @atomicrmw_umax_i16_acquire(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s1
 ; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    lh a1, 6(sp)
-; RV64I-NEXT:    bnez a0, .LBB101_4
-; RV64I-NEXT:  .LBB101_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB111_4
+; RV64I-NEXT:  .LBB111_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    and a0, a1, s2
 ; RV64I-NEXT:    mv a2, a1
-; RV64I-NEXT:    bltu s3, a0, .LBB101_1
+; RV64I-NEXT:    bltu s3, a0, .LBB111_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB101_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB111_2 Depth=1
 ; RV64I-NEXT:    mv a2, s0
-; RV64I-NEXT:    j .LBB101_1
-; RV64I-NEXT:  .LBB101_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB111_1
+; RV64I-NEXT:  .LBB111_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a1
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
@@ -8607,18 +9243,18 @@ define i16 @atomicrmw_umax_i16_acquire(ptr %a, i16 %b) nounwind {
 ; RV64IA-NEXT:    sllw a4, a3, a0
 ; RV64IA-NEXT:    and a1, a1, a3
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:  .LBB101_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB111_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w.aq a3, (a2)
 ; RV64IA-NEXT:    and a6, a3, a4
 ; RV64IA-NEXT:    mv a5, a3
-; RV64IA-NEXT:    bgeu a6, a1, .LBB101_3
-; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB101_1 Depth=1
+; RV64IA-NEXT:    bgeu a6, a1, .LBB111_3
+; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB111_1 Depth=1
 ; RV64IA-NEXT:    xor a5, a3, a1
 ; RV64IA-NEXT:    and a5, a5, a4
 ; RV64IA-NEXT:    xor a5, a3, a5
-; RV64IA-NEXT:  .LBB101_3: # in Loop: Header=BB101_1 Depth=1
+; RV64IA-NEXT:  .LBB111_3: # in Loop: Header=BB111_1 Depth=1
 ; RV64IA-NEXT:    sc.w a5, a5, (a2)
-; RV64IA-NEXT:    bnez a5, .LBB101_1
+; RV64IA-NEXT:    bnez a5, .LBB111_1
 ; RV64IA-NEXT:  # %bb.4:
 ; RV64IA-NEXT:    srlw a0, a3, a0
 ; RV64IA-NEXT:    ret
@@ -8641,9 +9277,9 @@ define i16 @atomicrmw_umax_i16_release(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    lui s2, 16
 ; RV32I-NEXT:    addi s2, s2, -1
 ; RV32I-NEXT:    and s3, s0, s2
-; RV32I-NEXT:    j .LBB102_2
-; RV32I-NEXT:  .LBB102_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB102_2 Depth=1
+; RV32I-NEXT:    j .LBB112_2
+; RV32I-NEXT:  .LBB112_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB112_2 Depth=1
 ; RV32I-NEXT:    sh a1, 10(sp)
 ; RV32I-NEXT:    addi a1, sp, 10
 ; RV32I-NEXT:    li a3, 3
@@ -8651,17 +9287,17 @@ define i16 @atomicrmw_umax_i16_release(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lh a1, 10(sp)
-; RV32I-NEXT:    bnez a0, .LBB102_4
-; RV32I-NEXT:  .LBB102_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB112_4
+; RV32I-NEXT:  .LBB112_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    and a0, a1, s2
 ; RV32I-NEXT:    mv a2, a1
-; RV32I-NEXT:    bltu s3, a0, .LBB102_1
+; RV32I-NEXT:    bltu s3, a0, .LBB112_1
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB102_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB112_2 Depth=1
 ; RV32I-NEXT:    mv a2, s0
-; RV32I-NEXT:    j .LBB102_1
-; RV32I-NEXT:  .LBB102_4: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB112_1
+; RV32I-NEXT:  .LBB112_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a1
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
@@ -8680,18 +9316,18 @@ define i16 @atomicrmw_umax_i16_release(ptr %a, i16 %b) nounwind {
 ; RV32IA-NEXT:    sll a4, a3, a0
 ; RV32IA-NEXT:    and a1, a1, a3
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:  .LBB102_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB112_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w a3, (a2)
 ; RV32IA-NEXT:    and a6, a3, a4
 ; RV32IA-NEXT:    mv a5, a3
-; RV32IA-NEXT:    bgeu a6, a1, .LBB102_3
-; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB102_1 Depth=1
+; RV32IA-NEXT:    bgeu a6, a1, .LBB112_3
+; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB112_1 Depth=1
 ; RV32IA-NEXT:    xor a5, a3, a1
 ; RV32IA-NEXT:    and a5, a5, a4
 ; RV32IA-NEXT:    xor a5, a3, a5
-; RV32IA-NEXT:  .LBB102_3: # in Loop: Header=BB102_1 Depth=1
+; RV32IA-NEXT:  .LBB112_3: # in Loop: Header=BB112_1 Depth=1
 ; RV32IA-NEXT:    sc.w.rl a5, a5, (a2)
-; RV32IA-NEXT:    bnez a5, .LBB102_1
+; RV32IA-NEXT:    bnez a5, .LBB112_1
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a3, a0
 ; RV32IA-NEXT:    ret
@@ -8710,9 +9346,9 @@ define i16 @atomicrmw_umax_i16_release(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    lui s2, 16
 ; RV64I-NEXT:    addiw s2, s2, -1
 ; RV64I-NEXT:    and s3, s0, s2
-; RV64I-NEXT:    j .LBB102_2
-; RV64I-NEXT:  .LBB102_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB102_2 Depth=1
+; RV64I-NEXT:    j .LBB112_2
+; RV64I-NEXT:  .LBB112_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB112_2 Depth=1
 ; RV64I-NEXT:    sh a1, 6(sp)
 ; RV64I-NEXT:    addi a1, sp, 6
 ; RV64I-NEXT:    li a3, 3
@@ -8720,17 +9356,17 @@ define i16 @atomicrmw_umax_i16_release(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    lh a1, 6(sp)
-; RV64I-NEXT:    bnez a0, .LBB102_4
-; RV64I-NEXT:  .LBB102_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB112_4
+; RV64I-NEXT:  .LBB112_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    and a0, a1, s2
 ; RV64I-NEXT:    mv a2, a1
-; RV64I-NEXT:    bltu s3, a0, .LBB102_1
+; RV64I-NEXT:    bltu s3, a0, .LBB112_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB102_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB112_2 Depth=1
 ; RV64I-NEXT:    mv a2, s0
-; RV64I-NEXT:    j .LBB102_1
-; RV64I-NEXT:  .LBB102_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB112_1
+; RV64I-NEXT:  .LBB112_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a1
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
@@ -8749,18 +9385,18 @@ define i16 @atomicrmw_umax_i16_release(ptr %a, i16 %b) nounwind {
 ; RV64IA-NEXT:    sllw a4, a3, a0
 ; RV64IA-NEXT:    and a1, a1, a3
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:  .LBB102_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB112_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w a3, (a2)
 ; RV64IA-NEXT:    and a6, a3, a4
 ; RV64IA-NEXT:    mv a5, a3
-; RV64IA-NEXT:    bgeu a6, a1, .LBB102_3
-; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB102_1 Depth=1
+; RV64IA-NEXT:    bgeu a6, a1, .LBB112_3
+; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB112_1 Depth=1
 ; RV64IA-NEXT:    xor a5, a3, a1
 ; RV64IA-NEXT:    and a5, a5, a4
 ; RV64IA-NEXT:    xor a5, a3, a5
-; RV64IA-NEXT:  .LBB102_3: # in Loop: Header=BB102_1 Depth=1
+; RV64IA-NEXT:  .LBB112_3: # in Loop: Header=BB112_1 Depth=1
 ; RV64IA-NEXT:    sc.w.rl a5, a5, (a2)
-; RV64IA-NEXT:    bnez a5, .LBB102_1
+; RV64IA-NEXT:    bnez a5, .LBB112_1
 ; RV64IA-NEXT:  # %bb.4:
 ; RV64IA-NEXT:    srlw a0, a3, a0
 ; RV64IA-NEXT:    ret
@@ -8783,9 +9419,9 @@ define i16 @atomicrmw_umax_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    lui s2, 16
 ; RV32I-NEXT:    addi s2, s2, -1
 ; RV32I-NEXT:    and s3, s0, s2
-; RV32I-NEXT:    j .LBB103_2
-; RV32I-NEXT:  .LBB103_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB103_2 Depth=1
+; RV32I-NEXT:    j .LBB113_2
+; RV32I-NEXT:  .LBB113_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB113_2 Depth=1
 ; RV32I-NEXT:    sh a1, 10(sp)
 ; RV32I-NEXT:    addi a1, sp, 10
 ; RV32I-NEXT:    li a3, 4
@@ -8793,17 +9429,17 @@ define i16 @atomicrmw_umax_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lh a1, 10(sp)
-; RV32I-NEXT:    bnez a0, .LBB103_4
-; RV32I-NEXT:  .LBB103_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB113_4
+; RV32I-NEXT:  .LBB113_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    and a0, a1, s2
 ; RV32I-NEXT:    mv a2, a1
-; RV32I-NEXT:    bltu s3, a0, .LBB103_1
+; RV32I-NEXT:    bltu s3, a0, .LBB113_1
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB103_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB113_2 Depth=1
 ; RV32I-NEXT:    mv a2, s0
-; RV32I-NEXT:    j .LBB103_1
-; RV32I-NEXT:  .LBB103_4: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB113_1
+; RV32I-NEXT:  .LBB113_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a1
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
@@ -8822,18 +9458,18 @@ define i16 @atomicrmw_umax_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; RV32IA-NEXT:    sll a4, a3, a0
 ; RV32IA-NEXT:    and a1, a1, a3
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:  .LBB103_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB113_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w.aq a3, (a2)
 ; RV32IA-NEXT:    and a6, a3, a4
 ; RV32IA-NEXT:    mv a5, a3
-; RV32IA-NEXT:    bgeu a6, a1, .LBB103_3
-; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB103_1 Depth=1
+; RV32IA-NEXT:    bgeu a6, a1, .LBB113_3
+; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB113_1 Depth=1
 ; RV32IA-NEXT:    xor a5, a3, a1
 ; RV32IA-NEXT:    and a5, a5, a4
 ; RV32IA-NEXT:    xor a5, a3, a5
-; RV32IA-NEXT:  .LBB103_3: # in Loop: Header=BB103_1 Depth=1
+; RV32IA-NEXT:  .LBB113_3: # in Loop: Header=BB113_1 Depth=1
 ; RV32IA-NEXT:    sc.w.rl a5, a5, (a2)
-; RV32IA-NEXT:    bnez a5, .LBB103_1
+; RV32IA-NEXT:    bnez a5, .LBB113_1
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a3, a0
 ; RV32IA-NEXT:    ret
@@ -8852,9 +9488,9 @@ define i16 @atomicrmw_umax_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    lui s2, 16
 ; RV64I-NEXT:    addiw s2, s2, -1
 ; RV64I-NEXT:    and s3, s0, s2
-; RV64I-NEXT:    j .LBB103_2
-; RV64I-NEXT:  .LBB103_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB103_2 Depth=1
+; RV64I-NEXT:    j .LBB113_2
+; RV64I-NEXT:  .LBB113_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB113_2 Depth=1
 ; RV64I-NEXT:    sh a1, 6(sp)
 ; RV64I-NEXT:    addi a1, sp, 6
 ; RV64I-NEXT:    li a3, 4
@@ -8862,17 +9498,17 @@ define i16 @atomicrmw_umax_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s1
 ; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    lh a1, 6(sp)
-; RV64I-NEXT:    bnez a0, .LBB103_4
-; RV64I-NEXT:  .LBB103_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB113_4
+; RV64I-NEXT:  .LBB113_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    and a0, a1, s2
 ; RV64I-NEXT:    mv a2, a1
-; RV64I-NEXT:    bltu s3, a0, .LBB103_1
+; RV64I-NEXT:    bltu s3, a0, .LBB113_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB103_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB113_2 Depth=1
 ; RV64I-NEXT:    mv a2, s0
-; RV64I-NEXT:    j .LBB103_1
-; RV64I-NEXT:  .LBB103_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB113_1
+; RV64I-NEXT:  .LBB113_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a1
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
@@ -8891,18 +9527,18 @@ define i16 @atomicrmw_umax_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; RV64IA-NEXT:    sllw a4, a3, a0
 ; RV64IA-NEXT:    and a1, a1, a3
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:  .LBB103_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB113_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w.aq a3, (a2)
 ; RV64IA-NEXT:    and a6, a3, a4
 ; RV64IA-NEXT:    mv a5, a3
-; RV64IA-NEXT:    bgeu a6, a1, .LBB103_3
-; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB103_1 Depth=1
+; RV64IA-NEXT:    bgeu a6, a1, .LBB113_3
+; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB113_1 Depth=1
 ; RV64IA-NEXT:    xor a5, a3, a1
 ; RV64IA-NEXT:    and a5, a5, a4
 ; RV64IA-NEXT:    xor a5, a3, a5
-; RV64IA-NEXT:  .LBB103_3: # in Loop: Header=BB103_1 Depth=1
+; RV64IA-NEXT:  .LBB113_3: # in Loop: Header=BB113_1 Depth=1
 ; RV64IA-NEXT:    sc.w.rl a5, a5, (a2)
-; RV64IA-NEXT:    bnez a5, .LBB103_1
+; RV64IA-NEXT:    bnez a5, .LBB113_1
 ; RV64IA-NEXT:  # %bb.4:
 ; RV64IA-NEXT:    srlw a0, a3, a0
 ; RV64IA-NEXT:    ret
@@ -8925,9 +9561,9 @@ define i16 @atomicrmw_umax_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    lui s2, 16
 ; RV32I-NEXT:    addi s2, s2, -1
 ; RV32I-NEXT:    and s3, s0, s2
-; RV32I-NEXT:    j .LBB104_2
-; RV32I-NEXT:  .LBB104_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB104_2 Depth=1
+; RV32I-NEXT:    j .LBB114_2
+; RV32I-NEXT:  .LBB114_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB114_2 Depth=1
 ; RV32I-NEXT:    sh a1, 10(sp)
 ; RV32I-NEXT:    addi a1, sp, 10
 ; RV32I-NEXT:    li a3, 5
@@ -8935,17 +9571,17 @@ define i16 @atomicrmw_umax_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lh a1, 10(sp)
-; RV32I-NEXT:    bnez a0, .LBB104_4
-; RV32I-NEXT:  .LBB104_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB114_4
+; RV32I-NEXT:  .LBB114_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    and a0, a1, s2
 ; RV32I-NEXT:    mv a2, a1
-; RV32I-NEXT:    bltu s3, a0, .LBB104_1
+; RV32I-NEXT:    bltu s3, a0, .LBB114_1
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB104_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB114_2 Depth=1
 ; RV32I-NEXT:    mv a2, s0
-; RV32I-NEXT:    j .LBB104_1
-; RV32I-NEXT:  .LBB104_4: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB114_1
+; RV32I-NEXT:  .LBB114_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a1
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
@@ -8964,18 +9600,18 @@ define i16 @atomicrmw_umax_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; RV32IA-NEXT:    sll a4, a3, a0
 ; RV32IA-NEXT:    and a1, a1, a3
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:  .LBB104_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB114_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w.aqrl a3, (a2)
 ; RV32IA-NEXT:    and a6, a3, a4
 ; RV32IA-NEXT:    mv a5, a3
-; RV32IA-NEXT:    bgeu a6, a1, .LBB104_3
-; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB104_1 Depth=1
+; RV32IA-NEXT:    bgeu a6, a1, .LBB114_3
+; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB114_1 Depth=1
 ; RV32IA-NEXT:    xor a5, a3, a1
 ; RV32IA-NEXT:    and a5, a5, a4
 ; RV32IA-NEXT:    xor a5, a3, a5
-; RV32IA-NEXT:  .LBB104_3: # in Loop: Header=BB104_1 Depth=1
+; RV32IA-NEXT:  .LBB114_3: # in Loop: Header=BB114_1 Depth=1
 ; RV32IA-NEXT:    sc.w.rl a5, a5, (a2)
-; RV32IA-NEXT:    bnez a5, .LBB104_1
+; RV32IA-NEXT:    bnez a5, .LBB114_1
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a3, a0
 ; RV32IA-NEXT:    ret
@@ -8994,9 +9630,9 @@ define i16 @atomicrmw_umax_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    lui s2, 16
 ; RV64I-NEXT:    addiw s2, s2, -1
 ; RV64I-NEXT:    and s3, s0, s2
-; RV64I-NEXT:    j .LBB104_2
-; RV64I-NEXT:  .LBB104_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB104_2 Depth=1
+; RV64I-NEXT:    j .LBB114_2
+; RV64I-NEXT:  .LBB114_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB114_2 Depth=1
 ; RV64I-NEXT:    sh a1, 6(sp)
 ; RV64I-NEXT:    addi a1, sp, 6
 ; RV64I-NEXT:    li a3, 5
@@ -9004,17 +9640,17 @@ define i16 @atomicrmw_umax_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s1
 ; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    lh a1, 6(sp)
-; RV64I-NEXT:    bnez a0, .LBB104_4
-; RV64I-NEXT:  .LBB104_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB114_4
+; RV64I-NEXT:  .LBB114_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    and a0, a1, s2
 ; RV64I-NEXT:    mv a2, a1
-; RV64I-NEXT:    bltu s3, a0, .LBB104_1
+; RV64I-NEXT:    bltu s3, a0, .LBB114_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB104_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB114_2 Depth=1
 ; RV64I-NEXT:    mv a2, s0
-; RV64I-NEXT:    j .LBB104_1
-; RV64I-NEXT:  .LBB104_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB114_1
+; RV64I-NEXT:  .LBB114_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a1
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
@@ -9033,18 +9669,18 @@ define i16 @atomicrmw_umax_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; RV64IA-NEXT:    sllw a4, a3, a0
 ; RV64IA-NEXT:    and a1, a1, a3
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:  .LBB104_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB114_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w.aqrl a3, (a2)
 ; RV64IA-NEXT:    and a6, a3, a4
 ; RV64IA-NEXT:    mv a5, a3
-; RV64IA-NEXT:    bgeu a6, a1, .LBB104_3
-; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB104_1 Depth=1
+; RV64IA-NEXT:    bgeu a6, a1, .LBB114_3
+; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB114_1 Depth=1
 ; RV64IA-NEXT:    xor a5, a3, a1
 ; RV64IA-NEXT:    and a5, a5, a4
 ; RV64IA-NEXT:    xor a5, a3, a5
-; RV64IA-NEXT:  .LBB104_3: # in Loop: Header=BB104_1 Depth=1
+; RV64IA-NEXT:  .LBB114_3: # in Loop: Header=BB114_1 Depth=1
 ; RV64IA-NEXT:    sc.w.rl a5, a5, (a2)
-; RV64IA-NEXT:    bnez a5, .LBB104_1
+; RV64IA-NEXT:    bnez a5, .LBB114_1
 ; RV64IA-NEXT:  # %bb.4:
 ; RV64IA-NEXT:    srlw a0, a3, a0
 ; RV64IA-NEXT:    ret
@@ -9067,9 +9703,9 @@ define i16 @atomicrmw_umin_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    lui s2, 16
 ; RV32I-NEXT:    addi s2, s2, -1
 ; RV32I-NEXT:    and s3, s0, s2
-; RV32I-NEXT:    j .LBB105_2
-; RV32I-NEXT:  .LBB105_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB105_2 Depth=1
+; RV32I-NEXT:    j .LBB115_2
+; RV32I-NEXT:  .LBB115_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB115_2 Depth=1
 ; RV32I-NEXT:    sh a1, 10(sp)
 ; RV32I-NEXT:    addi a1, sp, 10
 ; RV32I-NEXT:    mv a0, s1
@@ -9077,17 +9713,17 @@ define i16 @atomicrmw_umin_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lh a1, 10(sp)
-; RV32I-NEXT:    bnez a0, .LBB105_4
-; RV32I-NEXT:  .LBB105_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB115_4
+; RV32I-NEXT:  .LBB115_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    and a0, a1, s2
 ; RV32I-NEXT:    mv a2, a1
-; RV32I-NEXT:    bgeu s3, a0, .LBB105_1
+; RV32I-NEXT:    bgeu s3, a0, .LBB115_1
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB105_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB115_2 Depth=1
 ; RV32I-NEXT:    mv a2, s0
-; RV32I-NEXT:    j .LBB105_1
-; RV32I-NEXT:  .LBB105_4: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB115_1
+; RV32I-NEXT:  .LBB115_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a1
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
@@ -9106,18 +9742,18 @@ define i16 @atomicrmw_umin_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV32IA-NEXT:    sll a4, a3, a0
 ; RV32IA-NEXT:    and a1, a1, a3
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:  .LBB105_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB115_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w a3, (a2)
 ; RV32IA-NEXT:    and a6, a3, a4
 ; RV32IA-NEXT:    mv a5, a3
-; RV32IA-NEXT:    bgeu a1, a6, .LBB105_3
-; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB105_1 Depth=1
+; RV32IA-NEXT:    bgeu a1, a6, .LBB115_3
+; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB115_1 Depth=1
 ; RV32IA-NEXT:    xor a5, a3, a1
 ; RV32IA-NEXT:    and a5, a5, a4
 ; RV32IA-NEXT:    xor a5, a3, a5
-; RV32IA-NEXT:  .LBB105_3: # in Loop: Header=BB105_1 Depth=1
+; RV32IA-NEXT:  .LBB115_3: # in Loop: Header=BB115_1 Depth=1
 ; RV32IA-NEXT:    sc.w a5, a5, (a2)
-; RV32IA-NEXT:    bnez a5, .LBB105_1
+; RV32IA-NEXT:    bnez a5, .LBB115_1
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a3, a0
 ; RV32IA-NEXT:    ret
@@ -9136,9 +9772,9 @@ define i16 @atomicrmw_umin_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    lui s2, 16
 ; RV64I-NEXT:    addiw s2, s2, -1
 ; RV64I-NEXT:    and s3, s0, s2
-; RV64I-NEXT:    j .LBB105_2
-; RV64I-NEXT:  .LBB105_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB105_2 Depth=1
+; RV64I-NEXT:    j .LBB115_2
+; RV64I-NEXT:  .LBB115_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB115_2 Depth=1
 ; RV64I-NEXT:    sh a1, 6(sp)
 ; RV64I-NEXT:    addi a1, sp, 6
 ; RV64I-NEXT:    mv a0, s1
@@ -9146,17 +9782,17 @@ define i16 @atomicrmw_umin_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    lh a1, 6(sp)
-; RV64I-NEXT:    bnez a0, .LBB105_4
-; RV64I-NEXT:  .LBB105_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB115_4
+; RV64I-NEXT:  .LBB115_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    and a0, a1, s2
 ; RV64I-NEXT:    mv a2, a1
-; RV64I-NEXT:    bgeu s3, a0, .LBB105_1
+; RV64I-NEXT:    bgeu s3, a0, .LBB115_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB105_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB115_2 Depth=1
 ; RV64I-NEXT:    mv a2, s0
-; RV64I-NEXT:    j .LBB105_1
-; RV64I-NEXT:  .LBB105_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB115_1
+; RV64I-NEXT:  .LBB115_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a1
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
@@ -9175,18 +9811,18 @@ define i16 @atomicrmw_umin_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; RV64IA-NEXT:    sllw a4, a3, a0
 ; RV64IA-NEXT:    and a1, a1, a3
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:  .LBB105_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB115_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w a3, (a2)
 ; RV64IA-NEXT:    and a6, a3, a4
 ; RV64IA-NEXT:    mv a5, a3
-; RV64IA-NEXT:    bgeu a1, a6, .LBB105_3
-; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB105_1 Depth=1
+; RV64IA-NEXT:    bgeu a1, a6, .LBB115_3
+; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB115_1 Depth=1
 ; RV64IA-NEXT:    xor a5, a3, a1
 ; RV64IA-NEXT:    and a5, a5, a4
 ; RV64IA-NEXT:    xor a5, a3, a5
-; RV64IA-NEXT:  .LBB105_3: # in Loop: Header=BB105_1 Depth=1
+; RV64IA-NEXT:  .LBB115_3: # in Loop: Header=BB115_1 Depth=1
 ; RV64IA-NEXT:    sc.w a5, a5, (a2)
-; RV64IA-NEXT:    bnez a5, .LBB105_1
+; RV64IA-NEXT:    bnez a5, .LBB115_1
 ; RV64IA-NEXT:  # %bb.4:
 ; RV64IA-NEXT:    srlw a0, a3, a0
 ; RV64IA-NEXT:    ret
@@ -9209,9 +9845,9 @@ define i16 @atomicrmw_umin_i16_acquire(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    lui s2, 16
 ; RV32I-NEXT:    addi s2, s2, -1
 ; RV32I-NEXT:    and s3, s0, s2
-; RV32I-NEXT:    j .LBB106_2
-; RV32I-NEXT:  .LBB106_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB106_2 Depth=1
+; RV32I-NEXT:    j .LBB116_2
+; RV32I-NEXT:  .LBB116_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB116_2 Depth=1
 ; RV32I-NEXT:    sh a1, 10(sp)
 ; RV32I-NEXT:    addi a1, sp, 10
 ; RV32I-NEXT:    li a3, 2
@@ -9219,17 +9855,17 @@ define i16 @atomicrmw_umin_i16_acquire(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lh a1, 10(sp)
-; RV32I-NEXT:    bnez a0, .LBB106_4
-; RV32I-NEXT:  .LBB106_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB116_4
+; RV32I-NEXT:  .LBB116_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    and a0, a1, s2
 ; RV32I-NEXT:    mv a2, a1
-; RV32I-NEXT:    bgeu s3, a0, .LBB106_1
+; RV32I-NEXT:    bgeu s3, a0, .LBB116_1
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB106_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB116_2 Depth=1
 ; RV32I-NEXT:    mv a2, s0
-; RV32I-NEXT:    j .LBB106_1
-; RV32I-NEXT:  .LBB106_4: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB116_1
+; RV32I-NEXT:  .LBB116_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a1
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
@@ -9248,18 +9884,18 @@ define i16 @atomicrmw_umin_i16_acquire(ptr %a, i16 %b) nounwind {
 ; RV32IA-NEXT:    sll a4, a3, a0
 ; RV32IA-NEXT:    and a1, a1, a3
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:  .LBB106_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB116_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w.aq a3, (a2)
 ; RV32IA-NEXT:    and a6, a3, a4
 ; RV32IA-NEXT:    mv a5, a3
-; RV32IA-NEXT:    bgeu a1, a6, .LBB106_3
-; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB106_1 Depth=1
+; RV32IA-NEXT:    bgeu a1, a6, .LBB116_3
+; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB116_1 Depth=1
 ; RV32IA-NEXT:    xor a5, a3, a1
 ; RV32IA-NEXT:    and a5, a5, a4
 ; RV32IA-NEXT:    xor a5, a3, a5
-; RV32IA-NEXT:  .LBB106_3: # in Loop: Header=BB106_1 Depth=1
+; RV32IA-NEXT:  .LBB116_3: # in Loop: Header=BB116_1 Depth=1
 ; RV32IA-NEXT:    sc.w a5, a5, (a2)
-; RV32IA-NEXT:    bnez a5, .LBB106_1
+; RV32IA-NEXT:    bnez a5, .LBB116_1
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a3, a0
 ; RV32IA-NEXT:    ret
@@ -9278,9 +9914,9 @@ define i16 @atomicrmw_umin_i16_acquire(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    lui s2, 16
 ; RV64I-NEXT:    addiw s2, s2, -1
 ; RV64I-NEXT:    and s3, s0, s2
-; RV64I-NEXT:    j .LBB106_2
-; RV64I-NEXT:  .LBB106_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB106_2 Depth=1
+; RV64I-NEXT:    j .LBB116_2
+; RV64I-NEXT:  .LBB116_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB116_2 Depth=1
 ; RV64I-NEXT:    sh a1, 6(sp)
 ; RV64I-NEXT:    addi a1, sp, 6
 ; RV64I-NEXT:    li a3, 2
@@ -9288,17 +9924,17 @@ define i16 @atomicrmw_umin_i16_acquire(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s1
 ; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    lh a1, 6(sp)
-; RV64I-NEXT:    bnez a0, .LBB106_4
-; RV64I-NEXT:  .LBB106_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB116_4
+; RV64I-NEXT:  .LBB116_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    and a0, a1, s2
 ; RV64I-NEXT:    mv a2, a1
-; RV64I-NEXT:    bgeu s3, a0, .LBB106_1
+; RV64I-NEXT:    bgeu s3, a0, .LBB116_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB106_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB116_2 Depth=1
 ; RV64I-NEXT:    mv a2, s0
-; RV64I-NEXT:    j .LBB106_1
-; RV64I-NEXT:  .LBB106_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB116_1
+; RV64I-NEXT:  .LBB116_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a1
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
@@ -9317,18 +9953,18 @@ define i16 @atomicrmw_umin_i16_acquire(ptr %a, i16 %b) nounwind {
 ; RV64IA-NEXT:    sllw a4, a3, a0
 ; RV64IA-NEXT:    and a1, a1, a3
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:  .LBB106_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB116_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w.aq a3, (a2)
 ; RV64IA-NEXT:    and a6, a3, a4
 ; RV64IA-NEXT:    mv a5, a3
-; RV64IA-NEXT:    bgeu a1, a6, .LBB106_3
-; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB106_1 Depth=1
+; RV64IA-NEXT:    bgeu a1, a6, .LBB116_3
+; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB116_1 Depth=1
 ; RV64IA-NEXT:    xor a5, a3, a1
 ; RV64IA-NEXT:    and a5, a5, a4
 ; RV64IA-NEXT:    xor a5, a3, a5
-; RV64IA-NEXT:  .LBB106_3: # in Loop: Header=BB106_1 Depth=1
+; RV64IA-NEXT:  .LBB116_3: # in Loop: Header=BB116_1 Depth=1
 ; RV64IA-NEXT:    sc.w a5, a5, (a2)
-; RV64IA-NEXT:    bnez a5, .LBB106_1
+; RV64IA-NEXT:    bnez a5, .LBB116_1
 ; RV64IA-NEXT:  # %bb.4:
 ; RV64IA-NEXT:    srlw a0, a3, a0
 ; RV64IA-NEXT:    ret
@@ -9351,9 +9987,9 @@ define i16 @atomicrmw_umin_i16_release(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    lui s2, 16
 ; RV32I-NEXT:    addi s2, s2, -1
 ; RV32I-NEXT:    and s3, s0, s2
-; RV32I-NEXT:    j .LBB107_2
-; RV32I-NEXT:  .LBB107_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB107_2 Depth=1
+; RV32I-NEXT:    j .LBB117_2
+; RV32I-NEXT:  .LBB117_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB117_2 Depth=1
 ; RV32I-NEXT:    sh a1, 10(sp)
 ; RV32I-NEXT:    addi a1, sp, 10
 ; RV32I-NEXT:    li a3, 3
@@ -9361,17 +9997,17 @@ define i16 @atomicrmw_umin_i16_release(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lh a1, 10(sp)
-; RV32I-NEXT:    bnez a0, .LBB107_4
-; RV32I-NEXT:  .LBB107_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB117_4
+; RV32I-NEXT:  .LBB117_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    and a0, a1, s2
 ; RV32I-NEXT:    mv a2, a1
-; RV32I-NEXT:    bgeu s3, a0, .LBB107_1
+; RV32I-NEXT:    bgeu s3, a0, .LBB117_1
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB107_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB117_2 Depth=1
 ; RV32I-NEXT:    mv a2, s0
-; RV32I-NEXT:    j .LBB107_1
-; RV32I-NEXT:  .LBB107_4: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB117_1
+; RV32I-NEXT:  .LBB117_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a1
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
@@ -9390,18 +10026,18 @@ define i16 @atomicrmw_umin_i16_release(ptr %a, i16 %b) nounwind {
 ; RV32IA-NEXT:    sll a4, a3, a0
 ; RV32IA-NEXT:    and a1, a1, a3
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:  .LBB107_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB117_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w a3, (a2)
 ; RV32IA-NEXT:    and a6, a3, a4
 ; RV32IA-NEXT:    mv a5, a3
-; RV32IA-NEXT:    bgeu a1, a6, .LBB107_3
-; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB107_1 Depth=1
+; RV32IA-NEXT:    bgeu a1, a6, .LBB117_3
+; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB117_1 Depth=1
 ; RV32IA-NEXT:    xor a5, a3, a1
 ; RV32IA-NEXT:    and a5, a5, a4
 ; RV32IA-NEXT:    xor a5, a3, a5
-; RV32IA-NEXT:  .LBB107_3: # in Loop: Header=BB107_1 Depth=1
+; RV32IA-NEXT:  .LBB117_3: # in Loop: Header=BB117_1 Depth=1
 ; RV32IA-NEXT:    sc.w.rl a5, a5, (a2)
-; RV32IA-NEXT:    bnez a5, .LBB107_1
+; RV32IA-NEXT:    bnez a5, .LBB117_1
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a3, a0
 ; RV32IA-NEXT:    ret
@@ -9420,9 +10056,9 @@ define i16 @atomicrmw_umin_i16_release(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    lui s2, 16
 ; RV64I-NEXT:    addiw s2, s2, -1
 ; RV64I-NEXT:    and s3, s0, s2
-; RV64I-NEXT:    j .LBB107_2
-; RV64I-NEXT:  .LBB107_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB107_2 Depth=1
+; RV64I-NEXT:    j .LBB117_2
+; RV64I-NEXT:  .LBB117_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB117_2 Depth=1
 ; RV64I-NEXT:    sh a1, 6(sp)
 ; RV64I-NEXT:    addi a1, sp, 6
 ; RV64I-NEXT:    li a3, 3
@@ -9430,17 +10066,17 @@ define i16 @atomicrmw_umin_i16_release(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    lh a1, 6(sp)
-; RV64I-NEXT:    bnez a0, .LBB107_4
-; RV64I-NEXT:  .LBB107_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB117_4
+; RV64I-NEXT:  .LBB117_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    and a0, a1, s2
 ; RV64I-NEXT:    mv a2, a1
-; RV64I-NEXT:    bgeu s3, a0, .LBB107_1
+; RV64I-NEXT:    bgeu s3, a0, .LBB117_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB107_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB117_2 Depth=1
 ; RV64I-NEXT:    mv a2, s0
-; RV64I-NEXT:    j .LBB107_1
-; RV64I-NEXT:  .LBB107_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB117_1
+; RV64I-NEXT:  .LBB117_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a1
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
@@ -9459,18 +10095,18 @@ define i16 @atomicrmw_umin_i16_release(ptr %a, i16 %b) nounwind {
 ; RV64IA-NEXT:    sllw a4, a3, a0
 ; RV64IA-NEXT:    and a1, a1, a3
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:  .LBB107_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB117_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w a3, (a2)
 ; RV64IA-NEXT:    and a6, a3, a4
 ; RV64IA-NEXT:    mv a5, a3
-; RV64IA-NEXT:    bgeu a1, a6, .LBB107_3
-; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB107_1 Depth=1
+; RV64IA-NEXT:    bgeu a1, a6, .LBB117_3
+; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB117_1 Depth=1
 ; RV64IA-NEXT:    xor a5, a3, a1
 ; RV64IA-NEXT:    and a5, a5, a4
 ; RV64IA-NEXT:    xor a5, a3, a5
-; RV64IA-NEXT:  .LBB107_3: # in Loop: Header=BB107_1 Depth=1
+; RV64IA-NEXT:  .LBB117_3: # in Loop: Header=BB117_1 Depth=1
 ; RV64IA-NEXT:    sc.w.rl a5, a5, (a2)
-; RV64IA-NEXT:    bnez a5, .LBB107_1
+; RV64IA-NEXT:    bnez a5, .LBB117_1
 ; RV64IA-NEXT:  # %bb.4:
 ; RV64IA-NEXT:    srlw a0, a3, a0
 ; RV64IA-NEXT:    ret
@@ -9493,9 +10129,9 @@ define i16 @atomicrmw_umin_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    lui s2, 16
 ; RV32I-NEXT:    addi s2, s2, -1
 ; RV32I-NEXT:    and s3, s0, s2
-; RV32I-NEXT:    j .LBB108_2
-; RV32I-NEXT:  .LBB108_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB108_2 Depth=1
+; RV32I-NEXT:    j .LBB118_2
+; RV32I-NEXT:  .LBB118_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB118_2 Depth=1
 ; RV32I-NEXT:    sh a1, 10(sp)
 ; RV32I-NEXT:    addi a1, sp, 10
 ; RV32I-NEXT:    li a3, 4
@@ -9503,17 +10139,17 @@ define i16 @atomicrmw_umin_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lh a1, 10(sp)
-; RV32I-NEXT:    bnez a0, .LBB108_4
-; RV32I-NEXT:  .LBB108_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB118_4
+; RV32I-NEXT:  .LBB118_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    and a0, a1, s2
 ; RV32I-NEXT:    mv a2, a1
-; RV32I-NEXT:    bgeu s3, a0, .LBB108_1
+; RV32I-NEXT:    bgeu s3, a0, .LBB118_1
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB108_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB118_2 Depth=1
 ; RV32I-NEXT:    mv a2, s0
-; RV32I-NEXT:    j .LBB108_1
-; RV32I-NEXT:  .LBB108_4: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB118_1
+; RV32I-NEXT:  .LBB118_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a1
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
@@ -9532,18 +10168,18 @@ define i16 @atomicrmw_umin_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; RV32IA-NEXT:    sll a4, a3, a0
 ; RV32IA-NEXT:    and a1, a1, a3
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:  .LBB108_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB118_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w.aq a3, (a2)
 ; RV32IA-NEXT:    and a6, a3, a4
 ; RV32IA-NEXT:    mv a5, a3
-; RV32IA-NEXT:    bgeu a1, a6, .LBB108_3
-; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB108_1 Depth=1
+; RV32IA-NEXT:    bgeu a1, a6, .LBB118_3
+; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB118_1 Depth=1
 ; RV32IA-NEXT:    xor a5, a3, a1
 ; RV32IA-NEXT:    and a5, a5, a4
 ; RV32IA-NEXT:    xor a5, a3, a5
-; RV32IA-NEXT:  .LBB108_3: # in Loop: Header=BB108_1 Depth=1
+; RV32IA-NEXT:  .LBB118_3: # in Loop: Header=BB118_1 Depth=1
 ; RV32IA-NEXT:    sc.w.rl a5, a5, (a2)
-; RV32IA-NEXT:    bnez a5, .LBB108_1
+; RV32IA-NEXT:    bnez a5, .LBB118_1
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a3, a0
 ; RV32IA-NEXT:    ret
@@ -9562,9 +10198,9 @@ define i16 @atomicrmw_umin_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    lui s2, 16
 ; RV64I-NEXT:    addiw s2, s2, -1
 ; RV64I-NEXT:    and s3, s0, s2
-; RV64I-NEXT:    j .LBB108_2
-; RV64I-NEXT:  .LBB108_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB108_2 Depth=1
+; RV64I-NEXT:    j .LBB118_2
+; RV64I-NEXT:  .LBB118_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB118_2 Depth=1
 ; RV64I-NEXT:    sh a1, 6(sp)
 ; RV64I-NEXT:    addi a1, sp, 6
 ; RV64I-NEXT:    li a3, 4
@@ -9572,17 +10208,17 @@ define i16 @atomicrmw_umin_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s1
 ; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    lh a1, 6(sp)
-; RV64I-NEXT:    bnez a0, .LBB108_4
-; RV64I-NEXT:  .LBB108_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB118_4
+; RV64I-NEXT:  .LBB118_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    and a0, a1, s2
 ; RV64I-NEXT:    mv a2, a1
-; RV64I-NEXT:    bgeu s3, a0, .LBB108_1
+; RV64I-NEXT:    bgeu s3, a0, .LBB118_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB108_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB118_2 Depth=1
 ; RV64I-NEXT:    mv a2, s0
-; RV64I-NEXT:    j .LBB108_1
-; RV64I-NEXT:  .LBB108_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB118_1
+; RV64I-NEXT:  .LBB118_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a1
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
@@ -9601,18 +10237,18 @@ define i16 @atomicrmw_umin_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; RV64IA-NEXT:    sllw a4, a3, a0
 ; RV64IA-NEXT:    and a1, a1, a3
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:  .LBB108_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB118_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w.aq a3, (a2)
 ; RV64IA-NEXT:    and a6, a3, a4
 ; RV64IA-NEXT:    mv a5, a3
-; RV64IA-NEXT:    bgeu a1, a6, .LBB108_3
-; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB108_1 Depth=1
+; RV64IA-NEXT:    bgeu a1, a6, .LBB118_3
+; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB118_1 Depth=1
 ; RV64IA-NEXT:    xor a5, a3, a1
 ; RV64IA-NEXT:    and a5, a5, a4
 ; RV64IA-NEXT:    xor a5, a3, a5
-; RV64IA-NEXT:  .LBB108_3: # in Loop: Header=BB108_1 Depth=1
+; RV64IA-NEXT:  .LBB118_3: # in Loop: Header=BB118_1 Depth=1
 ; RV64IA-NEXT:    sc.w.rl a5, a5, (a2)
-; RV64IA-NEXT:    bnez a5, .LBB108_1
+; RV64IA-NEXT:    bnez a5, .LBB118_1
 ; RV64IA-NEXT:  # %bb.4:
 ; RV64IA-NEXT:    srlw a0, a3, a0
 ; RV64IA-NEXT:    ret
@@ -9635,9 +10271,9 @@ define i16 @atomicrmw_umin_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    lui s2, 16
 ; RV32I-NEXT:    addi s2, s2, -1
 ; RV32I-NEXT:    and s3, s0, s2
-; RV32I-NEXT:    j .LBB109_2
-; RV32I-NEXT:  .LBB109_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB109_2 Depth=1
+; RV32I-NEXT:    j .LBB119_2
+; RV32I-NEXT:  .LBB119_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB119_2 Depth=1
 ; RV32I-NEXT:    sh a1, 10(sp)
 ; RV32I-NEXT:    addi a1, sp, 10
 ; RV32I-NEXT:    li a3, 5
@@ -9645,17 +10281,17 @@ define i16 @atomicrmw_umin_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lh a1, 10(sp)
-; RV32I-NEXT:    bnez a0, .LBB109_4
-; RV32I-NEXT:  .LBB109_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB119_4
+; RV32I-NEXT:  .LBB119_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    and a0, a1, s2
 ; RV32I-NEXT:    mv a2, a1
-; RV32I-NEXT:    bgeu s3, a0, .LBB109_1
+; RV32I-NEXT:    bgeu s3, a0, .LBB119_1
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB109_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB119_2 Depth=1
 ; RV32I-NEXT:    mv a2, s0
-; RV32I-NEXT:    j .LBB109_1
-; RV32I-NEXT:  .LBB109_4: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB119_1
+; RV32I-NEXT:  .LBB119_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a1
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
@@ -9674,18 +10310,18 @@ define i16 @atomicrmw_umin_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; RV32IA-NEXT:    sll a4, a3, a0
 ; RV32IA-NEXT:    and a1, a1, a3
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:  .LBB109_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB119_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w.aqrl a3, (a2)
 ; RV32IA-NEXT:    and a6, a3, a4
 ; RV32IA-NEXT:    mv a5, a3
-; RV32IA-NEXT:    bgeu a1, a6, .LBB109_3
-; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB109_1 Depth=1
+; RV32IA-NEXT:    bgeu a1, a6, .LBB119_3
+; RV32IA-NEXT:  # %bb.2: # in Loop: Header=BB119_1 Depth=1
 ; RV32IA-NEXT:    xor a5, a3, a1
 ; RV32IA-NEXT:    and a5, a5, a4
 ; RV32IA-NEXT:    xor a5, a3, a5
-; RV32IA-NEXT:  .LBB109_3: # in Loop: Header=BB109_1 Depth=1
+; RV32IA-NEXT:  .LBB119_3: # in Loop: Header=BB119_1 Depth=1
 ; RV32IA-NEXT:    sc.w.rl a5, a5, (a2)
-; RV32IA-NEXT:    bnez a5, .LBB109_1
+; RV32IA-NEXT:    bnez a5, .LBB119_1
 ; RV32IA-NEXT:  # %bb.4:
 ; RV32IA-NEXT:    srl a0, a3, a0
 ; RV32IA-NEXT:    ret
@@ -9704,9 +10340,9 @@ define i16 @atomicrmw_umin_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    lui s2, 16
 ; RV64I-NEXT:    addiw s2, s2, -1
 ; RV64I-NEXT:    and s3, s0, s2
-; RV64I-NEXT:    j .LBB109_2
-; RV64I-NEXT:  .LBB109_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB109_2 Depth=1
+; RV64I-NEXT:    j .LBB119_2
+; RV64I-NEXT:  .LBB119_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB119_2 Depth=1
 ; RV64I-NEXT:    sh a1, 6(sp)
 ; RV64I-NEXT:    addi a1, sp, 6
 ; RV64I-NEXT:    li a3, 5
@@ -9714,17 +10350,17 @@ define i16 @atomicrmw_umin_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s1
 ; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    lh a1, 6(sp)
-; RV64I-NEXT:    bnez a0, .LBB109_4
-; RV64I-NEXT:  .LBB109_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB119_4
+; RV64I-NEXT:  .LBB119_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    and a0, a1, s2
 ; RV64I-NEXT:    mv a2, a1
-; RV64I-NEXT:    bgeu s3, a0, .LBB109_1
+; RV64I-NEXT:    bgeu s3, a0, .LBB119_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB109_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB119_2 Depth=1
 ; RV64I-NEXT:    mv a2, s0
-; RV64I-NEXT:    j .LBB109_1
-; RV64I-NEXT:  .LBB109_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB119_1
+; RV64I-NEXT:  .LBB119_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a1
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
@@ -9743,18 +10379,18 @@ define i16 @atomicrmw_umin_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; RV64IA-NEXT:    sllw a4, a3, a0
 ; RV64IA-NEXT:    and a1, a1, a3
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:  .LBB109_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB119_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w.aqrl a3, (a2)
 ; RV64IA-NEXT:    and a6, a3, a4
 ; RV64IA-NEXT:    mv a5, a3
-; RV64IA-NEXT:    bgeu a1, a6, .LBB109_3
-; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB109_1 Depth=1
+; RV64IA-NEXT:    bgeu a1, a6, .LBB119_3
+; RV64IA-NEXT:  # %bb.2: # in Loop: Header=BB119_1 Depth=1
 ; RV64IA-NEXT:    xor a5, a3, a1
 ; RV64IA-NEXT:    and a5, a5, a4
 ; RV64IA-NEXT:    xor a5, a3, a5
-; RV64IA-NEXT:  .LBB109_3: # in Loop: Header=BB109_1 Depth=1
+; RV64IA-NEXT:  .LBB119_3: # in Loop: Header=BB119_1 Depth=1
 ; RV64IA-NEXT:    sc.w.rl a5, a5, (a2)
-; RV64IA-NEXT:    bnez a5, .LBB109_1
+; RV64IA-NEXT:    bnez a5, .LBB119_1
 ; RV64IA-NEXT:  # %bb.4:
 ; RV64IA-NEXT:    srlw a0, a3, a0
 ; RV64IA-NEXT:    ret
@@ -10465,12 +11101,12 @@ define i32 @atomicrmw_nand_i32_monotonic(ptr %a, i32 %b) nounwind {
 ;
 ; RV32IA-LABEL: atomicrmw_nand_i32_monotonic:
 ; RV32IA:       # %bb.0:
-; RV32IA-NEXT:  .LBB130_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB140_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w a2, (a0)
 ; RV32IA-NEXT:    and a3, a2, a1
 ; RV32IA-NEXT:    not a3, a3
 ; RV32IA-NEXT:    sc.w a3, a3, (a0)
-; RV32IA-NEXT:    bnez a3, .LBB130_1
+; RV32IA-NEXT:    bnez a3, .LBB140_1
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    mv a0, a2
 ; RV32IA-NEXT:    ret
@@ -10487,12 +11123,12 @@ define i32 @atomicrmw_nand_i32_monotonic(ptr %a, i32 %b) nounwind {
 ;
 ; RV64IA-LABEL: atomicrmw_nand_i32_monotonic:
 ; RV64IA:       # %bb.0:
-; RV64IA-NEXT:  .LBB130_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB140_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w a2, (a0)
 ; RV64IA-NEXT:    and a3, a2, a1
 ; RV64IA-NEXT:    not a3, a3
 ; RV64IA-NEXT:    sc.w a3, a3, (a0)
-; RV64IA-NEXT:    bnez a3, .LBB130_1
+; RV64IA-NEXT:    bnez a3, .LBB140_1
 ; RV64IA-NEXT:  # %bb.2:
 ; RV64IA-NEXT:    mv a0, a2
 ; RV64IA-NEXT:    ret
@@ -10513,12 +11149,12 @@ define i32 @atomicrmw_nand_i32_acquire(ptr %a, i32 %b) nounwind {
 ;
 ; RV32IA-LABEL: atomicrmw_nand_i32_acquire:
 ; RV32IA:       # %bb.0:
-; RV32IA-NEXT:  .LBB131_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB141_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w.aq a2, (a0)
 ; RV32IA-NEXT:    and a3, a2, a1
 ; RV32IA-NEXT:    not a3, a3
 ; RV32IA-NEXT:    sc.w a3, a3, (a0)
-; RV32IA-NEXT:    bnez a3, .LBB131_1
+; RV32IA-NEXT:    bnez a3, .LBB141_1
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    mv a0, a2
 ; RV32IA-NEXT:    ret
@@ -10535,12 +11171,12 @@ define i32 @atomicrmw_nand_i32_acquire(ptr %a, i32 %b) nounwind {
 ;
 ; RV64IA-LABEL: atomicrmw_nand_i32_acquire:
 ; RV64IA:       # %bb.0:
-; RV64IA-NEXT:  .LBB131_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB141_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w.aq a2, (a0)
 ; RV64IA-NEXT:    and a3, a2, a1
 ; RV64IA-NEXT:    not a3, a3
 ; RV64IA-NEXT:    sc.w a3, a3, (a0)
-; RV64IA-NEXT:    bnez a3, .LBB131_1
+; RV64IA-NEXT:    bnez a3, .LBB141_1
 ; RV64IA-NEXT:  # %bb.2:
 ; RV64IA-NEXT:    mv a0, a2
 ; RV64IA-NEXT:    ret
@@ -10561,12 +11197,12 @@ define i32 @atomicrmw_nand_i32_release(ptr %a, i32 %b) nounwind {
 ;
 ; RV32IA-LABEL: atomicrmw_nand_i32_release:
 ; RV32IA:       # %bb.0:
-; RV32IA-NEXT:  .LBB132_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB142_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w a2, (a0)
 ; RV32IA-NEXT:    and a3, a2, a1
 ; RV32IA-NEXT:    not a3, a3
 ; RV32IA-NEXT:    sc.w.rl a3, a3, (a0)
-; RV32IA-NEXT:    bnez a3, .LBB132_1
+; RV32IA-NEXT:    bnez a3, .LBB142_1
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    mv a0, a2
 ; RV32IA-NEXT:    ret
@@ -10583,12 +11219,12 @@ define i32 @atomicrmw_nand_i32_release(ptr %a, i32 %b) nounwind {
 ;
 ; RV64IA-LABEL: atomicrmw_nand_i32_release:
 ; RV64IA:       # %bb.0:
-; RV64IA-NEXT:  .LBB132_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB142_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w a2, (a0)
 ; RV64IA-NEXT:    and a3, a2, a1
 ; RV64IA-NEXT:    not a3, a3
 ; RV64IA-NEXT:    sc.w.rl a3, a3, (a0)
-; RV64IA-NEXT:    bnez a3, .LBB132_1
+; RV64IA-NEXT:    bnez a3, .LBB142_1
 ; RV64IA-NEXT:  # %bb.2:
 ; RV64IA-NEXT:    mv a0, a2
 ; RV64IA-NEXT:    ret
@@ -10609,12 +11245,12 @@ define i32 @atomicrmw_nand_i32_acq_rel(ptr %a, i32 %b) nounwind {
 ;
 ; RV32IA-LABEL: atomicrmw_nand_i32_acq_rel:
 ; RV32IA:       # %bb.0:
-; RV32IA-NEXT:  .LBB133_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB143_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w.aq a2, (a0)
 ; RV32IA-NEXT:    and a3, a2, a1
 ; RV32IA-NEXT:    not a3, a3
 ; RV32IA-NEXT:    sc.w.rl a3, a3, (a0)
-; RV32IA-NEXT:    bnez a3, .LBB133_1
+; RV32IA-NEXT:    bnez a3, .LBB143_1
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    mv a0, a2
 ; RV32IA-NEXT:    ret
@@ -10631,12 +11267,12 @@ define i32 @atomicrmw_nand_i32_acq_rel(ptr %a, i32 %b) nounwind {
 ;
 ; RV64IA-LABEL: atomicrmw_nand_i32_acq_rel:
 ; RV64IA:       # %bb.0:
-; RV64IA-NEXT:  .LBB133_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB143_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w.aq a2, (a0)
 ; RV64IA-NEXT:    and a3, a2, a1
 ; RV64IA-NEXT:    not a3, a3
 ; RV64IA-NEXT:    sc.w.rl a3, a3, (a0)
-; RV64IA-NEXT:    bnez a3, .LBB133_1
+; RV64IA-NEXT:    bnez a3, .LBB143_1
 ; RV64IA-NEXT:  # %bb.2:
 ; RV64IA-NEXT:    mv a0, a2
 ; RV64IA-NEXT:    ret
@@ -10657,12 +11293,12 @@ define i32 @atomicrmw_nand_i32_seq_cst(ptr %a, i32 %b) nounwind {
 ;
 ; RV32IA-LABEL: atomicrmw_nand_i32_seq_cst:
 ; RV32IA:       # %bb.0:
-; RV32IA-NEXT:  .LBB134_1: # =>This Inner Loop Header: Depth=1
+; RV32IA-NEXT:  .LBB144_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w.aqrl a2, (a0)
 ; RV32IA-NEXT:    and a3, a2, a1
 ; RV32IA-NEXT:    not a3, a3
 ; RV32IA-NEXT:    sc.w.rl a3, a3, (a0)
-; RV32IA-NEXT:    bnez a3, .LBB134_1
+; RV32IA-NEXT:    bnez a3, .LBB144_1
 ; RV32IA-NEXT:  # %bb.2:
 ; RV32IA-NEXT:    mv a0, a2
 ; RV32IA-NEXT:    ret
@@ -10679,12 +11315,12 @@ define i32 @atomicrmw_nand_i32_seq_cst(ptr %a, i32 %b) nounwind {
 ;
 ; RV64IA-LABEL: atomicrmw_nand_i32_seq_cst:
 ; RV64IA:       # %bb.0:
-; RV64IA-NEXT:  .LBB134_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB144_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w.aqrl a2, (a0)
 ; RV64IA-NEXT:    and a3, a2, a1
 ; RV64IA-NEXT:    not a3, a3
 ; RV64IA-NEXT:    sc.w.rl a3, a3, (a0)
-; RV64IA-NEXT:    bnez a3, .LBB134_1
+; RV64IA-NEXT:    bnez a3, .LBB144_1
 ; RV64IA-NEXT:  # %bb.2:
 ; RV64IA-NEXT:    mv a0, a2
 ; RV64IA-NEXT:    ret
@@ -11042,9 +11678,9 @@ define i32 @atomicrmw_max_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lw a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
-; RV32I-NEXT:    j .LBB145_2
-; RV32I-NEXT:  .LBB145_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB145_2 Depth=1
+; RV32I-NEXT:    j .LBB155_2
+; RV32I-NEXT:  .LBB155_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB155_2 Depth=1
 ; RV32I-NEXT:    sw a3, 0(sp)
 ; RV32I-NEXT:    mv a1, sp
 ; RV32I-NEXT:    mv a0, s0
@@ -11052,16 +11688,16 @@ define i32 @atomicrmw_max_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw a3, 0(sp)
-; RV32I-NEXT:    bnez a0, .LBB145_4
-; RV32I-NEXT:  .LBB145_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB155_4
+; RV32I-NEXT:  .LBB155_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    blt s1, a3, .LBB145_1
+; RV32I-NEXT:    blt s1, a3, .LBB155_1
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB145_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB155_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
-; RV32I-NEXT:    j .LBB145_1
-; RV32I-NEXT:  .LBB145_4: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB155_1
+; RV32I-NEXT:  .LBB155_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
@@ -11085,9 +11721,9 @@ define i32 @atomicrmw_max_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    lw a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    sext.w s2, a1
-; RV64I-NEXT:    j .LBB145_2
-; RV64I-NEXT:  .LBB145_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB145_2 Depth=1
+; RV64I-NEXT:    j .LBB155_2
+; RV64I-NEXT:  .LBB155_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB155_2 Depth=1
 ; RV64I-NEXT:    sw a3, 12(sp)
 ; RV64I-NEXT:    addi a1, sp, 12
 ; RV64I-NEXT:    mv a0, s0
@@ -11095,16 +11731,16 @@ define i32 @atomicrmw_max_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    lw a3, 12(sp)
-; RV64I-NEXT:    bnez a0, .LBB145_4
-; RV64I-NEXT:  .LBB145_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB155_4
+; RV64I-NEXT:  .LBB155_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    blt s2, a3, .LBB145_1
+; RV64I-NEXT:    blt s2, a3, .LBB155_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB145_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB155_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB145_1
-; RV64I-NEXT:  .LBB145_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB155_1
+; RV64I-NEXT:  .LBB155_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
@@ -11131,9 +11767,9 @@ define i32 @atomicrmw_max_i32_acquire(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lw a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
-; RV32I-NEXT:    j .LBB146_2
-; RV32I-NEXT:  .LBB146_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB146_2 Depth=1
+; RV32I-NEXT:    j .LBB156_2
+; RV32I-NEXT:  .LBB156_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB156_2 Depth=1
 ; RV32I-NEXT:    sw a3, 0(sp)
 ; RV32I-NEXT:    mv a1, sp
 ; RV32I-NEXT:    li a3, 2
@@ -11141,16 +11777,16 @@ define i32 @atomicrmw_max_i32_acquire(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw a3, 0(sp)
-; RV32I-NEXT:    bnez a0, .LBB146_4
-; RV32I-NEXT:  .LBB146_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB156_4
+; RV32I-NEXT:  .LBB156_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    blt s1, a3, .LBB146_1
+; RV32I-NEXT:    blt s1, a3, .LBB156_1
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB146_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB156_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
-; RV32I-NEXT:    j .LBB146_1
-; RV32I-NEXT:  .LBB146_4: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB156_1
+; RV32I-NEXT:  .LBB156_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
@@ -11174,9 +11810,9 @@ define i32 @atomicrmw_max_i32_acquire(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    lw a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    sext.w s2, a1
-; RV64I-NEXT:    j .LBB146_2
-; RV64I-NEXT:  .LBB146_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB146_2 Depth=1
+; RV64I-NEXT:    j .LBB156_2
+; RV64I-NEXT:  .LBB156_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB156_2 Depth=1
 ; RV64I-NEXT:    sw a3, 12(sp)
 ; RV64I-NEXT:    addi a1, sp, 12
 ; RV64I-NEXT:    li a3, 2
@@ -11184,16 +11820,16 @@ define i32 @atomicrmw_max_i32_acquire(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    lw a3, 12(sp)
-; RV64I-NEXT:    bnez a0, .LBB146_4
-; RV64I-NEXT:  .LBB146_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB156_4
+; RV64I-NEXT:  .LBB156_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    blt s2, a3, .LBB146_1
+; RV64I-NEXT:    blt s2, a3, .LBB156_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB146_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB156_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB146_1
-; RV64I-NEXT:  .LBB146_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB156_1
+; RV64I-NEXT:  .LBB156_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
@@ -11220,9 +11856,9 @@ define i32 @atomicrmw_max_i32_release(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lw a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
-; RV32I-NEXT:    j .LBB147_2
-; RV32I-NEXT:  .LBB147_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB147_2 Depth=1
+; RV32I-NEXT:    j .LBB157_2
+; RV32I-NEXT:  .LBB157_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB157_2 Depth=1
 ; RV32I-NEXT:    sw a3, 0(sp)
 ; RV32I-NEXT:    mv a1, sp
 ; RV32I-NEXT:    li a3, 3
@@ -11230,16 +11866,16 @@ define i32 @atomicrmw_max_i32_release(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw a3, 0(sp)
-; RV32I-NEXT:    bnez a0, .LBB147_4
-; RV32I-NEXT:  .LBB147_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB157_4
+; RV32I-NEXT:  .LBB157_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    blt s1, a3, .LBB147_1
+; RV32I-NEXT:    blt s1, a3, .LBB157_1
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB147_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB157_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
-; RV32I-NEXT:    j .LBB147_1
-; RV32I-NEXT:  .LBB147_4: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB157_1
+; RV32I-NEXT:  .LBB157_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
@@ -11263,9 +11899,9 @@ define i32 @atomicrmw_max_i32_release(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    lw a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    sext.w s2, a1
-; RV64I-NEXT:    j .LBB147_2
-; RV64I-NEXT:  .LBB147_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB147_2 Depth=1
+; RV64I-NEXT:    j .LBB157_2
+; RV64I-NEXT:  .LBB157_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB157_2 Depth=1
 ; RV64I-NEXT:    sw a3, 12(sp)
 ; RV64I-NEXT:    addi a1, sp, 12
 ; RV64I-NEXT:    li a3, 3
@@ -11273,16 +11909,16 @@ define i32 @atomicrmw_max_i32_release(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    lw a3, 12(sp)
-; RV64I-NEXT:    bnez a0, .LBB147_4
-; RV64I-NEXT:  .LBB147_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB157_4
+; RV64I-NEXT:  .LBB157_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    blt s2, a3, .LBB147_1
+; RV64I-NEXT:    blt s2, a3, .LBB157_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB147_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB157_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB147_1
-; RV64I-NEXT:  .LBB147_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB157_1
+; RV64I-NEXT:  .LBB157_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
@@ -11309,9 +11945,9 @@ define i32 @atomicrmw_max_i32_acq_rel(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lw a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
-; RV32I-NEXT:    j .LBB148_2
-; RV32I-NEXT:  .LBB148_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB148_2 Depth=1
+; RV32I-NEXT:    j .LBB158_2
+; RV32I-NEXT:  .LBB158_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB158_2 Depth=1
 ; RV32I-NEXT:    sw a3, 0(sp)
 ; RV32I-NEXT:    mv a1, sp
 ; RV32I-NEXT:    li a3, 4
@@ -11319,16 +11955,16 @@ define i32 @atomicrmw_max_i32_acq_rel(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw a3, 0(sp)
-; RV32I-NEXT:    bnez a0, .LBB148_4
-; RV32I-NEXT:  .LBB148_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB158_4
+; RV32I-NEXT:  .LBB158_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    blt s1, a3, .LBB148_1
+; RV32I-NEXT:    blt s1, a3, .LBB158_1
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB148_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB158_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
-; RV32I-NEXT:    j .LBB148_1
-; RV32I-NEXT:  .LBB148_4: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB158_1
+; RV32I-NEXT:  .LBB158_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
@@ -11352,9 +11988,9 @@ define i32 @atomicrmw_max_i32_acq_rel(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    lw a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    sext.w s2, a1
-; RV64I-NEXT:    j .LBB148_2
-; RV64I-NEXT:  .LBB148_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB148_2 Depth=1
+; RV64I-NEXT:    j .LBB158_2
+; RV64I-NEXT:  .LBB158_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB158_2 Depth=1
 ; RV64I-NEXT:    sw a3, 12(sp)
 ; RV64I-NEXT:    addi a1, sp, 12
 ; RV64I-NEXT:    li a3, 4
@@ -11362,16 +11998,16 @@ define i32 @atomicrmw_max_i32_acq_rel(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    lw a3, 12(sp)
-; RV64I-NEXT:    bnez a0, .LBB148_4
-; RV64I-NEXT:  .LBB148_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB158_4
+; RV64I-NEXT:  .LBB158_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    blt s2, a3, .LBB148_1
+; RV64I-NEXT:    blt s2, a3, .LBB158_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB148_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB158_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB148_1
-; RV64I-NEXT:  .LBB148_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB158_1
+; RV64I-NEXT:  .LBB158_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
@@ -11398,9 +12034,9 @@ define i32 @atomicrmw_max_i32_seq_cst(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lw a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
-; RV32I-NEXT:    j .LBB149_2
-; RV32I-NEXT:  .LBB149_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB149_2 Depth=1
+; RV32I-NEXT:    j .LBB159_2
+; RV32I-NEXT:  .LBB159_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB159_2 Depth=1
 ; RV32I-NEXT:    sw a3, 0(sp)
 ; RV32I-NEXT:    mv a1, sp
 ; RV32I-NEXT:    li a3, 5
@@ -11408,16 +12044,16 @@ define i32 @atomicrmw_max_i32_seq_cst(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw a3, 0(sp)
-; RV32I-NEXT:    bnez a0, .LBB149_4
-; RV32I-NEXT:  .LBB149_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB159_4
+; RV32I-NEXT:  .LBB159_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    blt s1, a3, .LBB149_1
+; RV32I-NEXT:    blt s1, a3, .LBB159_1
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB149_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB159_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
-; RV32I-NEXT:    j .LBB149_1
-; RV32I-NEXT:  .LBB149_4: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB159_1
+; RV32I-NEXT:  .LBB159_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
@@ -11441,9 +12077,9 @@ define i32 @atomicrmw_max_i32_seq_cst(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    lw a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    sext.w s2, a1
-; RV64I-NEXT:    j .LBB149_2
-; RV64I-NEXT:  .LBB149_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB149_2 Depth=1
+; RV64I-NEXT:    j .LBB159_2
+; RV64I-NEXT:  .LBB159_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB159_2 Depth=1
 ; RV64I-NEXT:    sw a3, 12(sp)
 ; RV64I-NEXT:    addi a1, sp, 12
 ; RV64I-NEXT:    li a3, 5
@@ -11451,16 +12087,16 @@ define i32 @atomicrmw_max_i32_seq_cst(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    lw a3, 12(sp)
-; RV64I-NEXT:    bnez a0, .LBB149_4
-; RV64I-NEXT:  .LBB149_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB159_4
+; RV64I-NEXT:  .LBB159_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    blt s2, a3, .LBB149_1
+; RV64I-NEXT:    blt s2, a3, .LBB159_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB149_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB159_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB149_1
-; RV64I-NEXT:  .LBB149_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB159_1
+; RV64I-NEXT:  .LBB159_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
@@ -11487,9 +12123,9 @@ define i32 @atomicrmw_min_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lw a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
-; RV32I-NEXT:    j .LBB150_2
-; RV32I-NEXT:  .LBB150_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB150_2 Depth=1
+; RV32I-NEXT:    j .LBB160_2
+; RV32I-NEXT:  .LBB160_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB160_2 Depth=1
 ; RV32I-NEXT:    sw a3, 0(sp)
 ; RV32I-NEXT:    mv a1, sp
 ; RV32I-NEXT:    mv a0, s0
@@ -11497,16 +12133,16 @@ define i32 @atomicrmw_min_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw a3, 0(sp)
-; RV32I-NEXT:    bnez a0, .LBB150_4
-; RV32I-NEXT:  .LBB150_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB160_4
+; RV32I-NEXT:  .LBB160_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    bge s1, a3, .LBB150_1
+; RV32I-NEXT:    bge s1, a3, .LBB160_1
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB150_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB160_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
-; RV32I-NEXT:    j .LBB150_1
-; RV32I-NEXT:  .LBB150_4: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB160_1
+; RV32I-NEXT:  .LBB160_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
@@ -11530,9 +12166,9 @@ define i32 @atomicrmw_min_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    lw a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    sext.w s2, a1
-; RV64I-NEXT:    j .LBB150_2
-; RV64I-NEXT:  .LBB150_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB150_2 Depth=1
+; RV64I-NEXT:    j .LBB160_2
+; RV64I-NEXT:  .LBB160_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB160_2 Depth=1
 ; RV64I-NEXT:    sw a3, 12(sp)
 ; RV64I-NEXT:    addi a1, sp, 12
 ; RV64I-NEXT:    mv a0, s0
@@ -11540,16 +12176,16 @@ define i32 @atomicrmw_min_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    lw a3, 12(sp)
-; RV64I-NEXT:    bnez a0, .LBB150_4
-; RV64I-NEXT:  .LBB150_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB160_4
+; RV64I-NEXT:  .LBB160_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    bge s2, a3, .LBB150_1
+; RV64I-NEXT:    bge s2, a3, .LBB160_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB150_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB160_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB150_1
-; RV64I-NEXT:  .LBB150_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB160_1
+; RV64I-NEXT:  .LBB160_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
@@ -11576,9 +12212,9 @@ define i32 @atomicrmw_min_i32_acquire(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lw a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
-; RV32I-NEXT:    j .LBB151_2
-; RV32I-NEXT:  .LBB151_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB151_2 Depth=1
+; RV32I-NEXT:    j .LBB161_2
+; RV32I-NEXT:  .LBB161_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB161_2 Depth=1
 ; RV32I-NEXT:    sw a3, 0(sp)
 ; RV32I-NEXT:    mv a1, sp
 ; RV32I-NEXT:    li a3, 2
@@ -11586,16 +12222,16 @@ define i32 @atomicrmw_min_i32_acquire(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw a3, 0(sp)
-; RV32I-NEXT:    bnez a0, .LBB151_4
-; RV32I-NEXT:  .LBB151_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB161_4
+; RV32I-NEXT:  .LBB161_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    bge s1, a3, .LBB151_1
+; RV32I-NEXT:    bge s1, a3, .LBB161_1
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB151_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB161_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
-; RV32I-NEXT:    j .LBB151_1
-; RV32I-NEXT:  .LBB151_4: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB161_1
+; RV32I-NEXT:  .LBB161_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
@@ -11619,9 +12255,9 @@ define i32 @atomicrmw_min_i32_acquire(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    lw a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    sext.w s2, a1
-; RV64I-NEXT:    j .LBB151_2
-; RV64I-NEXT:  .LBB151_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB151_2 Depth=1
+; RV64I-NEXT:    j .LBB161_2
+; RV64I-NEXT:  .LBB161_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB161_2 Depth=1
 ; RV64I-NEXT:    sw a3, 12(sp)
 ; RV64I-NEXT:    addi a1, sp, 12
 ; RV64I-NEXT:    li a3, 2
@@ -11629,16 +12265,16 @@ define i32 @atomicrmw_min_i32_acquire(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    lw a3, 12(sp)
-; RV64I-NEXT:    bnez a0, .LBB151_4
-; RV64I-NEXT:  .LBB151_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB161_4
+; RV64I-NEXT:  .LBB161_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    bge s2, a3, .LBB151_1
+; RV64I-NEXT:    bge s2, a3, .LBB161_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB151_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB161_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB151_1
-; RV64I-NEXT:  .LBB151_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB161_1
+; RV64I-NEXT:  .LBB161_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
@@ -11665,9 +12301,9 @@ define i32 @atomicrmw_min_i32_release(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lw a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
-; RV32I-NEXT:    j .LBB152_2
-; RV32I-NEXT:  .LBB152_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB152_2 Depth=1
+; RV32I-NEXT:    j .LBB162_2
+; RV32I-NEXT:  .LBB162_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB162_2 Depth=1
 ; RV32I-NEXT:    sw a3, 0(sp)
 ; RV32I-NEXT:    mv a1, sp
 ; RV32I-NEXT:    li a3, 3
@@ -11675,16 +12311,16 @@ define i32 @atomicrmw_min_i32_release(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw a3, 0(sp)
-; RV32I-NEXT:    bnez a0, .LBB152_4
-; RV32I-NEXT:  .LBB152_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB162_4
+; RV32I-NEXT:  .LBB162_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    bge s1, a3, .LBB152_1
+; RV32I-NEXT:    bge s1, a3, .LBB162_1
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB152_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB162_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
-; RV32I-NEXT:    j .LBB152_1
-; RV32I-NEXT:  .LBB152_4: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB162_1
+; RV32I-NEXT:  .LBB162_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
@@ -11708,9 +12344,9 @@ define i32 @atomicrmw_min_i32_release(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    lw a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    sext.w s2, a1
-; RV64I-NEXT:    j .LBB152_2
-; RV64I-NEXT:  .LBB152_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB152_2 Depth=1
+; RV64I-NEXT:    j .LBB162_2
+; RV64I-NEXT:  .LBB162_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB162_2 Depth=1
 ; RV64I-NEXT:    sw a3, 12(sp)
 ; RV64I-NEXT:    addi a1, sp, 12
 ; RV64I-NEXT:    li a3, 3
@@ -11718,16 +12354,16 @@ define i32 @atomicrmw_min_i32_release(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    lw a3, 12(sp)
-; RV64I-NEXT:    bnez a0, .LBB152_4
-; RV64I-NEXT:  .LBB152_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB162_4
+; RV64I-NEXT:  .LBB162_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    bge s2, a3, .LBB152_1
+; RV64I-NEXT:    bge s2, a3, .LBB162_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB152_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB162_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB152_1
-; RV64I-NEXT:  .LBB152_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB162_1
+; RV64I-NEXT:  .LBB162_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
@@ -11754,9 +12390,9 @@ define i32 @atomicrmw_min_i32_acq_rel(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lw a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
-; RV32I-NEXT:    j .LBB153_2
-; RV32I-NEXT:  .LBB153_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB153_2 Depth=1
+; RV32I-NEXT:    j .LBB163_2
+; RV32I-NEXT:  .LBB163_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB163_2 Depth=1
 ; RV32I-NEXT:    sw a3, 0(sp)
 ; RV32I-NEXT:    mv a1, sp
 ; RV32I-NEXT:    li a3, 4
@@ -11764,16 +12400,16 @@ define i32 @atomicrmw_min_i32_acq_rel(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw a3, 0(sp)
-; RV32I-NEXT:    bnez a0, .LBB153_4
-; RV32I-NEXT:  .LBB153_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB163_4
+; RV32I-NEXT:  .LBB163_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    bge s1, a3, .LBB153_1
+; RV32I-NEXT:    bge s1, a3, .LBB163_1
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB153_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB163_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
-; RV32I-NEXT:    j .LBB153_1
-; RV32I-NEXT:  .LBB153_4: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB163_1
+; RV32I-NEXT:  .LBB163_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
@@ -11797,9 +12433,9 @@ define i32 @atomicrmw_min_i32_acq_rel(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    lw a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    sext.w s2, a1
-; RV64I-NEXT:    j .LBB153_2
-; RV64I-NEXT:  .LBB153_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB153_2 Depth=1
+; RV64I-NEXT:    j .LBB163_2
+; RV64I-NEXT:  .LBB163_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB163_2 Depth=1
 ; RV64I-NEXT:    sw a3, 12(sp)
 ; RV64I-NEXT:    addi a1, sp, 12
 ; RV64I-NEXT:    li a3, 4
@@ -11807,16 +12443,16 @@ define i32 @atomicrmw_min_i32_acq_rel(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    lw a3, 12(sp)
-; RV64I-NEXT:    bnez a0, .LBB153_4
-; RV64I-NEXT:  .LBB153_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB163_4
+; RV64I-NEXT:  .LBB163_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    bge s2, a3, .LBB153_1
+; RV64I-NEXT:    bge s2, a3, .LBB163_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB153_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB163_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB153_1
-; RV64I-NEXT:  .LBB153_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB163_1
+; RV64I-NEXT:  .LBB163_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
@@ -11843,9 +12479,9 @@ define i32 @atomicrmw_min_i32_seq_cst(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lw a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
-; RV32I-NEXT:    j .LBB154_2
-; RV32I-NEXT:  .LBB154_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB154_2 Depth=1
+; RV32I-NEXT:    j .LBB164_2
+; RV32I-NEXT:  .LBB164_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB164_2 Depth=1
 ; RV32I-NEXT:    sw a3, 0(sp)
 ; RV32I-NEXT:    mv a1, sp
 ; RV32I-NEXT:    li a3, 5
@@ -11853,16 +12489,16 @@ define i32 @atomicrmw_min_i32_seq_cst(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw a3, 0(sp)
-; RV32I-NEXT:    bnez a0, .LBB154_4
-; RV32I-NEXT:  .LBB154_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB164_4
+; RV32I-NEXT:  .LBB164_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    bge s1, a3, .LBB154_1
+; RV32I-NEXT:    bge s1, a3, .LBB164_1
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB154_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB164_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
-; RV32I-NEXT:    j .LBB154_1
-; RV32I-NEXT:  .LBB154_4: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB164_1
+; RV32I-NEXT:  .LBB164_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
@@ -11886,9 +12522,9 @@ define i32 @atomicrmw_min_i32_seq_cst(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    lw a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    sext.w s2, a1
-; RV64I-NEXT:    j .LBB154_2
-; RV64I-NEXT:  .LBB154_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB154_2 Depth=1
+; RV64I-NEXT:    j .LBB164_2
+; RV64I-NEXT:  .LBB164_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB164_2 Depth=1
 ; RV64I-NEXT:    sw a3, 12(sp)
 ; RV64I-NEXT:    addi a1, sp, 12
 ; RV64I-NEXT:    li a3, 5
@@ -11896,16 +12532,16 @@ define i32 @atomicrmw_min_i32_seq_cst(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    lw a3, 12(sp)
-; RV64I-NEXT:    bnez a0, .LBB154_4
-; RV64I-NEXT:  .LBB154_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB164_4
+; RV64I-NEXT:  .LBB164_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    bge s2, a3, .LBB154_1
+; RV64I-NEXT:    bge s2, a3, .LBB164_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB154_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB164_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB154_1
-; RV64I-NEXT:  .LBB154_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB164_1
+; RV64I-NEXT:  .LBB164_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
@@ -11932,9 +12568,9 @@ define i32 @atomicrmw_umax_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lw a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
-; RV32I-NEXT:    j .LBB155_2
-; RV32I-NEXT:  .LBB155_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB155_2 Depth=1
+; RV32I-NEXT:    j .LBB165_2
+; RV32I-NEXT:  .LBB165_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB165_2 Depth=1
 ; RV32I-NEXT:    sw a3, 0(sp)
 ; RV32I-NEXT:    mv a1, sp
 ; RV32I-NEXT:    mv a0, s0
@@ -11942,16 +12578,16 @@ define i32 @atomicrmw_umax_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw a3, 0(sp)
-; RV32I-NEXT:    bnez a0, .LBB155_4
-; RV32I-NEXT:  .LBB155_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB165_4
+; RV32I-NEXT:  .LBB165_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    bltu s1, a3, .LBB155_1
+; RV32I-NEXT:    bltu s1, a3, .LBB165_1
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB155_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB165_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
-; RV32I-NEXT:    j .LBB155_1
-; RV32I-NEXT:  .LBB155_4: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB165_1
+; RV32I-NEXT:  .LBB165_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
@@ -11975,9 +12611,9 @@ define i32 @atomicrmw_umax_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    lw a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    sext.w s2, a1
-; RV64I-NEXT:    j .LBB155_2
-; RV64I-NEXT:  .LBB155_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB155_2 Depth=1
+; RV64I-NEXT:    j .LBB165_2
+; RV64I-NEXT:  .LBB165_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB165_2 Depth=1
 ; RV64I-NEXT:    sw a3, 12(sp)
 ; RV64I-NEXT:    addi a1, sp, 12
 ; RV64I-NEXT:    mv a0, s0
@@ -11985,16 +12621,16 @@ define i32 @atomicrmw_umax_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    lw a3, 12(sp)
-; RV64I-NEXT:    bnez a0, .LBB155_4
-; RV64I-NEXT:  .LBB155_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB165_4
+; RV64I-NEXT:  .LBB165_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    bltu s2, a3, .LBB155_1
+; RV64I-NEXT:    bltu s2, a3, .LBB165_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB155_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB165_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB155_1
-; RV64I-NEXT:  .LBB155_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB165_1
+; RV64I-NEXT:  .LBB165_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
@@ -12021,9 +12657,9 @@ define i32 @atomicrmw_umax_i32_acquire(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lw a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
-; RV32I-NEXT:    j .LBB156_2
-; RV32I-NEXT:  .LBB156_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB156_2 Depth=1
+; RV32I-NEXT:    j .LBB166_2
+; RV32I-NEXT:  .LBB166_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB166_2 Depth=1
 ; RV32I-NEXT:    sw a3, 0(sp)
 ; RV32I-NEXT:    mv a1, sp
 ; RV32I-NEXT:    li a3, 2
@@ -12031,16 +12667,16 @@ define i32 @atomicrmw_umax_i32_acquire(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw a3, 0(sp)
-; RV32I-NEXT:    bnez a0, .LBB156_4
-; RV32I-NEXT:  .LBB156_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB166_4
+; RV32I-NEXT:  .LBB166_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    bltu s1, a3, .LBB156_1
+; RV32I-NEXT:    bltu s1, a3, .LBB166_1
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB156_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB166_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
-; RV32I-NEXT:    j .LBB156_1
-; RV32I-NEXT:  .LBB156_4: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB166_1
+; RV32I-NEXT:  .LBB166_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
@@ -12064,9 +12700,9 @@ define i32 @atomicrmw_umax_i32_acquire(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    lw a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    sext.w s2, a1
-; RV64I-NEXT:    j .LBB156_2
-; RV64I-NEXT:  .LBB156_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB156_2 Depth=1
+; RV64I-NEXT:    j .LBB166_2
+; RV64I-NEXT:  .LBB166_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB166_2 Depth=1
 ; RV64I-NEXT:    sw a3, 12(sp)
 ; RV64I-NEXT:    addi a1, sp, 12
 ; RV64I-NEXT:    li a3, 2
@@ -12074,16 +12710,16 @@ define i32 @atomicrmw_umax_i32_acquire(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    lw a3, 12(sp)
-; RV64I-NEXT:    bnez a0, .LBB156_4
-; RV64I-NEXT:  .LBB156_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB166_4
+; RV64I-NEXT:  .LBB166_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    bltu s2, a3, .LBB156_1
+; RV64I-NEXT:    bltu s2, a3, .LBB166_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB156_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB166_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB156_1
-; RV64I-NEXT:  .LBB156_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB166_1
+; RV64I-NEXT:  .LBB166_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
@@ -12110,9 +12746,9 @@ define i32 @atomicrmw_umax_i32_release(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lw a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
-; RV32I-NEXT:    j .LBB157_2
-; RV32I-NEXT:  .LBB157_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB157_2 Depth=1
+; RV32I-NEXT:    j .LBB167_2
+; RV32I-NEXT:  .LBB167_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB167_2 Depth=1
 ; RV32I-NEXT:    sw a3, 0(sp)
 ; RV32I-NEXT:    mv a1, sp
 ; RV32I-NEXT:    li a3, 3
@@ -12120,16 +12756,16 @@ define i32 @atomicrmw_umax_i32_release(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw a3, 0(sp)
-; RV32I-NEXT:    bnez a0, .LBB157_4
-; RV32I-NEXT:  .LBB157_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB167_4
+; RV32I-NEXT:  .LBB167_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    bltu s1, a3, .LBB157_1
+; RV32I-NEXT:    bltu s1, a3, .LBB167_1
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB157_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB167_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
-; RV32I-NEXT:    j .LBB157_1
-; RV32I-NEXT:  .LBB157_4: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB167_1
+; RV32I-NEXT:  .LBB167_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
@@ -12153,9 +12789,9 @@ define i32 @atomicrmw_umax_i32_release(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    lw a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    sext.w s2, a1
-; RV64I-NEXT:    j .LBB157_2
-; RV64I-NEXT:  .LBB157_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB157_2 Depth=1
+; RV64I-NEXT:    j .LBB167_2
+; RV64I-NEXT:  .LBB167_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB167_2 Depth=1
 ; RV64I-NEXT:    sw a3, 12(sp)
 ; RV64I-NEXT:    addi a1, sp, 12
 ; RV64I-NEXT:    li a3, 3
@@ -12163,16 +12799,16 @@ define i32 @atomicrmw_umax_i32_release(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    lw a3, 12(sp)
-; RV64I-NEXT:    bnez a0, .LBB157_4
-; RV64I-NEXT:  .LBB157_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB167_4
+; RV64I-NEXT:  .LBB167_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    bltu s2, a3, .LBB157_1
+; RV64I-NEXT:    bltu s2, a3, .LBB167_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB157_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB167_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB157_1
-; RV64I-NEXT:  .LBB157_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB167_1
+; RV64I-NEXT:  .LBB167_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
@@ -12199,9 +12835,9 @@ define i32 @atomicrmw_umax_i32_acq_rel(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lw a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
-; RV32I-NEXT:    j .LBB158_2
-; RV32I-NEXT:  .LBB158_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB158_2 Depth=1
+; RV32I-NEXT:    j .LBB168_2
+; RV32I-NEXT:  .LBB168_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB168_2 Depth=1
 ; RV32I-NEXT:    sw a3, 0(sp)
 ; RV32I-NEXT:    mv a1, sp
 ; RV32I-NEXT:    li a3, 4
@@ -12209,16 +12845,16 @@ define i32 @atomicrmw_umax_i32_acq_rel(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw a3, 0(sp)
-; RV32I-NEXT:    bnez a0, .LBB158_4
-; RV32I-NEXT:  .LBB158_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB168_4
+; RV32I-NEXT:  .LBB168_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    bltu s1, a3, .LBB158_1
+; RV32I-NEXT:    bltu s1, a3, .LBB168_1
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB158_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB168_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
-; RV32I-NEXT:    j .LBB158_1
-; RV32I-NEXT:  .LBB158_4: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB168_1
+; RV32I-NEXT:  .LBB168_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
@@ -12242,9 +12878,9 @@ define i32 @atomicrmw_umax_i32_acq_rel(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    lw a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    sext.w s2, a1
-; RV64I-NEXT:    j .LBB158_2
-; RV64I-NEXT:  .LBB158_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB158_2 Depth=1
+; RV64I-NEXT:    j .LBB168_2
+; RV64I-NEXT:  .LBB168_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB168_2 Depth=1
 ; RV64I-NEXT:    sw a3, 12(sp)
 ; RV64I-NEXT:    addi a1, sp, 12
 ; RV64I-NEXT:    li a3, 4
@@ -12252,16 +12888,16 @@ define i32 @atomicrmw_umax_i32_acq_rel(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    lw a3, 12(sp)
-; RV64I-NEXT:    bnez a0, .LBB158_4
-; RV64I-NEXT:  .LBB158_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB168_4
+; RV64I-NEXT:  .LBB168_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    bltu s2, a3, .LBB158_1
+; RV64I-NEXT:    bltu s2, a3, .LBB168_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB158_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB168_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB158_1
-; RV64I-NEXT:  .LBB158_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB168_1
+; RV64I-NEXT:  .LBB168_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
@@ -12288,9 +12924,9 @@ define i32 @atomicrmw_umax_i32_seq_cst(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lw a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
-; RV32I-NEXT:    j .LBB159_2
-; RV32I-NEXT:  .LBB159_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB159_2 Depth=1
+; RV32I-NEXT:    j .LBB169_2
+; RV32I-NEXT:  .LBB169_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB169_2 Depth=1
 ; RV32I-NEXT:    sw a3, 0(sp)
 ; RV32I-NEXT:    mv a1, sp
 ; RV32I-NEXT:    li a3, 5
@@ -12298,16 +12934,16 @@ define i32 @atomicrmw_umax_i32_seq_cst(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw a3, 0(sp)
-; RV32I-NEXT:    bnez a0, .LBB159_4
-; RV32I-NEXT:  .LBB159_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB169_4
+; RV32I-NEXT:  .LBB169_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    bltu s1, a3, .LBB159_1
+; RV32I-NEXT:    bltu s1, a3, .LBB169_1
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB159_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB169_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
-; RV32I-NEXT:    j .LBB159_1
-; RV32I-NEXT:  .LBB159_4: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB169_1
+; RV32I-NEXT:  .LBB169_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
@@ -12331,9 +12967,9 @@ define i32 @atomicrmw_umax_i32_seq_cst(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    lw a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    sext.w s2, a1
-; RV64I-NEXT:    j .LBB159_2
-; RV64I-NEXT:  .LBB159_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB159_2 Depth=1
+; RV64I-NEXT:    j .LBB169_2
+; RV64I-NEXT:  .LBB169_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB169_2 Depth=1
 ; RV64I-NEXT:    sw a3, 12(sp)
 ; RV64I-NEXT:    addi a1, sp, 12
 ; RV64I-NEXT:    li a3, 5
@@ -12341,16 +12977,16 @@ define i32 @atomicrmw_umax_i32_seq_cst(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    lw a3, 12(sp)
-; RV64I-NEXT:    bnez a0, .LBB159_4
-; RV64I-NEXT:  .LBB159_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB169_4
+; RV64I-NEXT:  .LBB169_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    bltu s2, a3, .LBB159_1
+; RV64I-NEXT:    bltu s2, a3, .LBB169_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB159_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB169_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB159_1
-; RV64I-NEXT:  .LBB159_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB169_1
+; RV64I-NEXT:  .LBB169_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
@@ -12377,9 +13013,9 @@ define i32 @atomicrmw_umin_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lw a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
-; RV32I-NEXT:    j .LBB160_2
-; RV32I-NEXT:  .LBB160_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB160_2 Depth=1
+; RV32I-NEXT:    j .LBB170_2
+; RV32I-NEXT:  .LBB170_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB170_2 Depth=1
 ; RV32I-NEXT:    sw a3, 0(sp)
 ; RV32I-NEXT:    mv a1, sp
 ; RV32I-NEXT:    mv a0, s0
@@ -12387,16 +13023,16 @@ define i32 @atomicrmw_umin_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw a3, 0(sp)
-; RV32I-NEXT:    bnez a0, .LBB160_4
-; RV32I-NEXT:  .LBB160_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB170_4
+; RV32I-NEXT:  .LBB170_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    bgeu s1, a3, .LBB160_1
+; RV32I-NEXT:    bgeu s1, a3, .LBB170_1
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB160_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB170_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
-; RV32I-NEXT:    j .LBB160_1
-; RV32I-NEXT:  .LBB160_4: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB170_1
+; RV32I-NEXT:  .LBB170_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
@@ -12420,9 +13056,9 @@ define i32 @atomicrmw_umin_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    lw a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    sext.w s2, a1
-; RV64I-NEXT:    j .LBB160_2
-; RV64I-NEXT:  .LBB160_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB160_2 Depth=1
+; RV64I-NEXT:    j .LBB170_2
+; RV64I-NEXT:  .LBB170_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB170_2 Depth=1
 ; RV64I-NEXT:    sw a3, 12(sp)
 ; RV64I-NEXT:    addi a1, sp, 12
 ; RV64I-NEXT:    mv a0, s0
@@ -12430,16 +13066,16 @@ define i32 @atomicrmw_umin_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    lw a3, 12(sp)
-; RV64I-NEXT:    bnez a0, .LBB160_4
-; RV64I-NEXT:  .LBB160_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB170_4
+; RV64I-NEXT:  .LBB170_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    bgeu s2, a3, .LBB160_1
+; RV64I-NEXT:    bgeu s2, a3, .LBB170_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB160_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB170_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB160_1
-; RV64I-NEXT:  .LBB160_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB170_1
+; RV64I-NEXT:  .LBB170_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
@@ -12466,9 +13102,9 @@ define i32 @atomicrmw_umin_i32_acquire(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lw a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
-; RV32I-NEXT:    j .LBB161_2
-; RV32I-NEXT:  .LBB161_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB161_2 Depth=1
+; RV32I-NEXT:    j .LBB171_2
+; RV32I-NEXT:  .LBB171_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB171_2 Depth=1
 ; RV32I-NEXT:    sw a3, 0(sp)
 ; RV32I-NEXT:    mv a1, sp
 ; RV32I-NEXT:    li a3, 2
@@ -12476,16 +13112,16 @@ define i32 @atomicrmw_umin_i32_acquire(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw a3, 0(sp)
-; RV32I-NEXT:    bnez a0, .LBB161_4
-; RV32I-NEXT:  .LBB161_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB171_4
+; RV32I-NEXT:  .LBB171_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    bgeu s1, a3, .LBB161_1
+; RV32I-NEXT:    bgeu s1, a3, .LBB171_1
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB161_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB171_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
-; RV32I-NEXT:    j .LBB161_1
-; RV32I-NEXT:  .LBB161_4: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB171_1
+; RV32I-NEXT:  .LBB171_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
@@ -12509,9 +13145,9 @@ define i32 @atomicrmw_umin_i32_acquire(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    lw a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    sext.w s2, a1
-; RV64I-NEXT:    j .LBB161_2
-; RV64I-NEXT:  .LBB161_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB161_2 Depth=1
+; RV64I-NEXT:    j .LBB171_2
+; RV64I-NEXT:  .LBB171_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB171_2 Depth=1
 ; RV64I-NEXT:    sw a3, 12(sp)
 ; RV64I-NEXT:    addi a1, sp, 12
 ; RV64I-NEXT:    li a3, 2
@@ -12519,16 +13155,16 @@ define i32 @atomicrmw_umin_i32_acquire(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    lw a3, 12(sp)
-; RV64I-NEXT:    bnez a0, .LBB161_4
-; RV64I-NEXT:  .LBB161_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB171_4
+; RV64I-NEXT:  .LBB171_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    bgeu s2, a3, .LBB161_1
+; RV64I-NEXT:    bgeu s2, a3, .LBB171_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB161_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB171_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB161_1
-; RV64I-NEXT:  .LBB161_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB171_1
+; RV64I-NEXT:  .LBB171_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
@@ -12555,9 +13191,9 @@ define i32 @atomicrmw_umin_i32_release(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lw a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
-; RV32I-NEXT:    j .LBB162_2
-; RV32I-NEXT:  .LBB162_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB162_2 Depth=1
+; RV32I-NEXT:    j .LBB172_2
+; RV32I-NEXT:  .LBB172_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB172_2 Depth=1
 ; RV32I-NEXT:    sw a3, 0(sp)
 ; RV32I-NEXT:    mv a1, sp
 ; RV32I-NEXT:    li a3, 3
@@ -12565,16 +13201,16 @@ define i32 @atomicrmw_umin_i32_release(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw a3, 0(sp)
-; RV32I-NEXT:    bnez a0, .LBB162_4
-; RV32I-NEXT:  .LBB162_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB172_4
+; RV32I-NEXT:  .LBB172_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    bgeu s1, a3, .LBB162_1
+; RV32I-NEXT:    bgeu s1, a3, .LBB172_1
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB162_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB172_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
-; RV32I-NEXT:    j .LBB162_1
-; RV32I-NEXT:  .LBB162_4: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB172_1
+; RV32I-NEXT:  .LBB172_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
@@ -12598,9 +13234,9 @@ define i32 @atomicrmw_umin_i32_release(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    lw a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    sext.w s2, a1
-; RV64I-NEXT:    j .LBB162_2
-; RV64I-NEXT:  .LBB162_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB162_2 Depth=1
+; RV64I-NEXT:    j .LBB172_2
+; RV64I-NEXT:  .LBB172_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB172_2 Depth=1
 ; RV64I-NEXT:    sw a3, 12(sp)
 ; RV64I-NEXT:    addi a1, sp, 12
 ; RV64I-NEXT:    li a3, 3
@@ -12608,16 +13244,16 @@ define i32 @atomicrmw_umin_i32_release(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    lw a3, 12(sp)
-; RV64I-NEXT:    bnez a0, .LBB162_4
-; RV64I-NEXT:  .LBB162_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB172_4
+; RV64I-NEXT:  .LBB172_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    bgeu s2, a3, .LBB162_1
+; RV64I-NEXT:    bgeu s2, a3, .LBB172_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB162_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB172_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB162_1
-; RV64I-NEXT:  .LBB162_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB172_1
+; RV64I-NEXT:  .LBB172_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
@@ -12644,9 +13280,9 @@ define i32 @atomicrmw_umin_i32_acq_rel(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lw a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
-; RV32I-NEXT:    j .LBB163_2
-; RV32I-NEXT:  .LBB163_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB163_2 Depth=1
+; RV32I-NEXT:    j .LBB173_2
+; RV32I-NEXT:  .LBB173_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB173_2 Depth=1
 ; RV32I-NEXT:    sw a3, 0(sp)
 ; RV32I-NEXT:    mv a1, sp
 ; RV32I-NEXT:    li a3, 4
@@ -12654,16 +13290,16 @@ define i32 @atomicrmw_umin_i32_acq_rel(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw a3, 0(sp)
-; RV32I-NEXT:    bnez a0, .LBB163_4
-; RV32I-NEXT:  .LBB163_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB173_4
+; RV32I-NEXT:  .LBB173_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    bgeu s1, a3, .LBB163_1
+; RV32I-NEXT:    bgeu s1, a3, .LBB173_1
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB163_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB173_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
-; RV32I-NEXT:    j .LBB163_1
-; RV32I-NEXT:  .LBB163_4: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB173_1
+; RV32I-NEXT:  .LBB173_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
@@ -12687,9 +13323,9 @@ define i32 @atomicrmw_umin_i32_acq_rel(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    lw a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    sext.w s2, a1
-; RV64I-NEXT:    j .LBB163_2
-; RV64I-NEXT:  .LBB163_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB163_2 Depth=1
+; RV64I-NEXT:    j .LBB173_2
+; RV64I-NEXT:  .LBB173_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB173_2 Depth=1
 ; RV64I-NEXT:    sw a3, 12(sp)
 ; RV64I-NEXT:    addi a1, sp, 12
 ; RV64I-NEXT:    li a3, 4
@@ -12697,16 +13333,16 @@ define i32 @atomicrmw_umin_i32_acq_rel(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    lw a3, 12(sp)
-; RV64I-NEXT:    bnez a0, .LBB163_4
-; RV64I-NEXT:  .LBB163_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB173_4
+; RV64I-NEXT:  .LBB173_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    bgeu s2, a3, .LBB163_1
+; RV64I-NEXT:    bgeu s2, a3, .LBB173_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB163_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB173_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB163_1
-; RV64I-NEXT:  .LBB163_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB173_1
+; RV64I-NEXT:  .LBB173_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
@@ -12733,9 +13369,9 @@ define i32 @atomicrmw_umin_i32_seq_cst(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lw a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
-; RV32I-NEXT:    j .LBB164_2
-; RV32I-NEXT:  .LBB164_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB164_2 Depth=1
+; RV32I-NEXT:    j .LBB174_2
+; RV32I-NEXT:  .LBB174_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB174_2 Depth=1
 ; RV32I-NEXT:    sw a3, 0(sp)
 ; RV32I-NEXT:    mv a1, sp
 ; RV32I-NEXT:    li a3, 5
@@ -12743,16 +13379,16 @@ define i32 @atomicrmw_umin_i32_seq_cst(ptr %a, i32 %b) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw a3, 0(sp)
-; RV32I-NEXT:    bnez a0, .LBB164_4
-; RV32I-NEXT:  .LBB164_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB174_4
+; RV32I-NEXT:  .LBB174_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    bgeu s1, a3, .LBB164_1
+; RV32I-NEXT:    bgeu s1, a3, .LBB174_1
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB164_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB174_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
-; RV32I-NEXT:    j .LBB164_1
-; RV32I-NEXT:  .LBB164_4: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB174_1
+; RV32I-NEXT:  .LBB174_4: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
@@ -12775,10 +13411,10 @@ define i32 @atomicrmw_umin_i32_seq_cst(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lw a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
-; RV64I-NEXT:    sext.w s2, a1
-; RV64I-NEXT:    j .LBB164_2
-; RV64I-NEXT:  .LBB164_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB164_2 Depth=1
+; RV64I-NEXT:    sext.w s2, a1
+; RV64I-NEXT:    j .LBB174_2
+; RV64I-NEXT:  .LBB174_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB174_2 Depth=1
 ; RV64I-NEXT:    sw a3, 12(sp)
 ; RV64I-NEXT:    addi a1, sp, 12
 ; RV64I-NEXT:    li a3, 5
@@ -12786,16 +13422,16 @@ define i32 @atomicrmw_umin_i32_seq_cst(ptr %a, i32 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    lw a3, 12(sp)
-; RV64I-NEXT:    bnez a0, .LBB164_4
-; RV64I-NEXT:  .LBB164_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB174_4
+; RV64I-NEXT:  .LBB174_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    bgeu s2, a3, .LBB164_1
+; RV64I-NEXT:    bgeu s2, a3, .LBB174_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB164_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB174_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB164_1
-; RV64I-NEXT:  .LBB164_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB174_1
+; RV64I-NEXT:  .LBB174_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
@@ -13630,12 +14266,12 @@ define i64 @atomicrmw_nand_i64_monotonic(ptr %a, i64 %b) nounwind {
 ;
 ; RV64IA-LABEL: atomicrmw_nand_i64_monotonic:
 ; RV64IA:       # %bb.0:
-; RV64IA-NEXT:  .LBB185_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB195_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.d a2, (a0)
 ; RV64IA-NEXT:    and a3, a2, a1
 ; RV64IA-NEXT:    not a3, a3
 ; RV64IA-NEXT:    sc.d a3, a3, (a0)
-; RV64IA-NEXT:    bnez a3, .LBB185_1
+; RV64IA-NEXT:    bnez a3, .LBB195_1
 ; RV64IA-NEXT:  # %bb.2:
 ; RV64IA-NEXT:    mv a0, a2
 ; RV64IA-NEXT:    ret
@@ -13676,12 +14312,12 @@ define i64 @atomicrmw_nand_i64_acquire(ptr %a, i64 %b) nounwind {
 ;
 ; RV64IA-LABEL: atomicrmw_nand_i64_acquire:
 ; RV64IA:       # %bb.0:
-; RV64IA-NEXT:  .LBB186_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB196_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.d.aq a2, (a0)
 ; RV64IA-NEXT:    and a3, a2, a1
 ; RV64IA-NEXT:    not a3, a3
 ; RV64IA-NEXT:    sc.d a3, a3, (a0)
-; RV64IA-NEXT:    bnez a3, .LBB186_1
+; RV64IA-NEXT:    bnez a3, .LBB196_1
 ; RV64IA-NEXT:  # %bb.2:
 ; RV64IA-NEXT:    mv a0, a2
 ; RV64IA-NEXT:    ret
@@ -13722,12 +14358,12 @@ define i64 @atomicrmw_nand_i64_release(ptr %a, i64 %b) nounwind {
 ;
 ; RV64IA-LABEL: atomicrmw_nand_i64_release:
 ; RV64IA:       # %bb.0:
-; RV64IA-NEXT:  .LBB187_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB197_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.d a2, (a0)
 ; RV64IA-NEXT:    and a3, a2, a1
 ; RV64IA-NEXT:    not a3, a3
 ; RV64IA-NEXT:    sc.d.rl a3, a3, (a0)
-; RV64IA-NEXT:    bnez a3, .LBB187_1
+; RV64IA-NEXT:    bnez a3, .LBB197_1
 ; RV64IA-NEXT:  # %bb.2:
 ; RV64IA-NEXT:    mv a0, a2
 ; RV64IA-NEXT:    ret
@@ -13768,12 +14404,12 @@ define i64 @atomicrmw_nand_i64_acq_rel(ptr %a, i64 %b) nounwind {
 ;
 ; RV64IA-LABEL: atomicrmw_nand_i64_acq_rel:
 ; RV64IA:       # %bb.0:
-; RV64IA-NEXT:  .LBB188_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB198_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.d.aq a2, (a0)
 ; RV64IA-NEXT:    and a3, a2, a1
 ; RV64IA-NEXT:    not a3, a3
 ; RV64IA-NEXT:    sc.d.rl a3, a3, (a0)
-; RV64IA-NEXT:    bnez a3, .LBB188_1
+; RV64IA-NEXT:    bnez a3, .LBB198_1
 ; RV64IA-NEXT:  # %bb.2:
 ; RV64IA-NEXT:    mv a0, a2
 ; RV64IA-NEXT:    ret
@@ -13814,12 +14450,12 @@ define i64 @atomicrmw_nand_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ;
 ; RV64IA-LABEL: atomicrmw_nand_i64_seq_cst:
 ; RV64IA:       # %bb.0:
-; RV64IA-NEXT:  .LBB189_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT:  .LBB199_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.d.aqrl a2, (a0)
 ; RV64IA-NEXT:    and a3, a2, a1
 ; RV64IA-NEXT:    not a3, a3
 ; RV64IA-NEXT:    sc.d.rl a3, a3, (a0)
-; RV64IA-NEXT:    bnez a3, .LBB189_1
+; RV64IA-NEXT:    bnez a3, .LBB199_1
 ; RV64IA-NEXT:  # %bb.2:
 ; RV64IA-NEXT:    mv a0, a2
 ; RV64IA-NEXT:    ret
@@ -14230,9 +14866,9 @@ define i64 @atomicrmw_max_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    lw a4, 0(a0)
 ; RV32I-NEXT:    mv s1, a2
 ; RV32I-NEXT:    mv s2, a1
-; RV32I-NEXT:    j .LBB200_2
-; RV32I-NEXT:  .LBB200_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB200_2 Depth=1
+; RV32I-NEXT:    j .LBB210_2
+; RV32I-NEXT:  .LBB210_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB210_2 Depth=1
 ; RV32I-NEXT:    sw a4, 8(sp)
 ; RV32I-NEXT:    sw a5, 12(sp)
 ; RV32I-NEXT:    addi a1, sp, 8
@@ -14242,27 +14878,27 @@ define i64 @atomicrmw_max_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
-; RV32I-NEXT:    bnez a0, .LBB200_7
-; RV32I-NEXT:  .LBB200_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB210_7
+; RV32I-NEXT:  .LBB210_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32I-NEXT:    beq a5, s1, .LBB200_4
+; RV32I-NEXT:    beq a5, s1, .LBB210_4
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB200_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB210_2 Depth=1
 ; RV32I-NEXT:    slt a0, s1, a5
-; RV32I-NEXT:    j .LBB200_5
-; RV32I-NEXT:  .LBB200_4: # in Loop: Header=BB200_2 Depth=1
+; RV32I-NEXT:    j .LBB210_5
+; RV32I-NEXT:  .LBB210_4: # in Loop: Header=BB210_2 Depth=1
 ; RV32I-NEXT:    sltu a0, s2, a4
-; RV32I-NEXT:  .LBB200_5: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB200_2 Depth=1
+; RV32I-NEXT:  .LBB210_5: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB210_2 Depth=1
 ; RV32I-NEXT:    mv a2, a4
 ; RV32I-NEXT:    mv a3, a5
-; RV32I-NEXT:    bnez a0, .LBB200_1
+; RV32I-NEXT:    bnez a0, .LBB210_1
 ; RV32I-NEXT:  # %bb.6: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB200_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB210_2 Depth=1
 ; RV32I-NEXT:    mv a2, s2
 ; RV32I-NEXT:    mv a3, s1
-; RV32I-NEXT:    j .LBB200_1
-; RV32I-NEXT:  .LBB200_7: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB210_1
+; RV32I-NEXT:  .LBB210_7: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a4
 ; RV32I-NEXT:    mv a1, a5
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
@@ -14284,9 +14920,9 @@ define i64 @atomicrmw_max_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    lw a4, 0(a0)
 ; RV32IA-NEXT:    mv s1, a2
 ; RV32IA-NEXT:    mv s2, a1
-; RV32IA-NEXT:    j .LBB200_2
-; RV32IA-NEXT:  .LBB200_1: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB200_2 Depth=1
+; RV32IA-NEXT:    j .LBB210_2
+; RV32IA-NEXT:  .LBB210_1: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB210_2 Depth=1
 ; RV32IA-NEXT:    sw a4, 8(sp)
 ; RV32IA-NEXT:    sw a5, 12(sp)
 ; RV32IA-NEXT:    addi a1, sp, 8
@@ -14296,27 +14932,27 @@ define i64 @atomicrmw_max_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
-; RV32IA-NEXT:    bnez a0, .LBB200_7
-; RV32IA-NEXT:  .LBB200_2: # %atomicrmw.start
+; RV32IA-NEXT:    bnez a0, .LBB210_7
+; RV32IA-NEXT:  .LBB210_2: # %atomicrmw.start
 ; RV32IA-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32IA-NEXT:    beq a5, s1, .LBB200_4
+; RV32IA-NEXT:    beq a5, s1, .LBB210_4
 ; RV32IA-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB200_2 Depth=1
+; RV32IA-NEXT:    # in Loop: Header=BB210_2 Depth=1
 ; RV32IA-NEXT:    slt a0, s1, a5
-; RV32IA-NEXT:    j .LBB200_5
-; RV32IA-NEXT:  .LBB200_4: # in Loop: Header=BB200_2 Depth=1
+; RV32IA-NEXT:    j .LBB210_5
+; RV32IA-NEXT:  .LBB210_4: # in Loop: Header=BB210_2 Depth=1
 ; RV32IA-NEXT:    sltu a0, s2, a4
-; RV32IA-NEXT:  .LBB200_5: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB200_2 Depth=1
+; RV32IA-NEXT:  .LBB210_5: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB210_2 Depth=1
 ; RV32IA-NEXT:    mv a2, a4
 ; RV32IA-NEXT:    mv a3, a5
-; RV32IA-NEXT:    bnez a0, .LBB200_1
+; RV32IA-NEXT:    bnez a0, .LBB210_1
 ; RV32IA-NEXT:  # %bb.6: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB200_2 Depth=1
+; RV32IA-NEXT:    # in Loop: Header=BB210_2 Depth=1
 ; RV32IA-NEXT:    mv a2, s2
 ; RV32IA-NEXT:    mv a3, s1
-; RV32IA-NEXT:    j .LBB200_1
-; RV32IA-NEXT:  .LBB200_7: # %atomicrmw.end
+; RV32IA-NEXT:    j .LBB210_1
+; RV32IA-NEXT:  .LBB210_7: # %atomicrmw.end
 ; RV32IA-NEXT:    mv a0, a4
 ; RV32IA-NEXT:    mv a1, a5
 ; RV32IA-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
@@ -14335,9 +14971,9 @@ define i64 @atomicrmw_max_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    ld a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
-; RV64I-NEXT:    j .LBB200_2
-; RV64I-NEXT:  .LBB200_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB200_2 Depth=1
+; RV64I-NEXT:    j .LBB210_2
+; RV64I-NEXT:  .LBB210_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB210_2 Depth=1
 ; RV64I-NEXT:    sd a3, 0(sp)
 ; RV64I-NEXT:    mv a1, sp
 ; RV64I-NEXT:    mv a0, s0
@@ -14345,16 +14981,16 @@ define i64 @atomicrmw_max_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld a3, 0(sp)
-; RV64I-NEXT:    bnez a0, .LBB200_4
-; RV64I-NEXT:  .LBB200_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB210_4
+; RV64I-NEXT:  .LBB210_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    blt s1, a3, .LBB200_1
+; RV64I-NEXT:    blt s1, a3, .LBB210_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB200_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB210_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB200_1
-; RV64I-NEXT:  .LBB200_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB210_1
+; RV64I-NEXT:  .LBB210_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
@@ -14383,9 +15019,9 @@ define i64 @atomicrmw_max_i64_acquire(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    lw a4, 0(a0)
 ; RV32I-NEXT:    mv s1, a2
 ; RV32I-NEXT:    mv s2, a1
-; RV32I-NEXT:    j .LBB201_2
-; RV32I-NEXT:  .LBB201_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB201_2 Depth=1
+; RV32I-NEXT:    j .LBB211_2
+; RV32I-NEXT:  .LBB211_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB211_2 Depth=1
 ; RV32I-NEXT:    sw a4, 8(sp)
 ; RV32I-NEXT:    sw a5, 12(sp)
 ; RV32I-NEXT:    addi a1, sp, 8
@@ -14395,27 +15031,27 @@ define i64 @atomicrmw_max_i64_acquire(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
-; RV32I-NEXT:    bnez a0, .LBB201_7
-; RV32I-NEXT:  .LBB201_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB211_7
+; RV32I-NEXT:  .LBB211_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32I-NEXT:    beq a5, s1, .LBB201_4
+; RV32I-NEXT:    beq a5, s1, .LBB211_4
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB201_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB211_2 Depth=1
 ; RV32I-NEXT:    slt a0, s1, a5
-; RV32I-NEXT:    j .LBB201_5
-; RV32I-NEXT:  .LBB201_4: # in Loop: Header=BB201_2 Depth=1
+; RV32I-NEXT:    j .LBB211_5
+; RV32I-NEXT:  .LBB211_4: # in Loop: Header=BB211_2 Depth=1
 ; RV32I-NEXT:    sltu a0, s2, a4
-; RV32I-NEXT:  .LBB201_5: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB201_2 Depth=1
+; RV32I-NEXT:  .LBB211_5: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB211_2 Depth=1
 ; RV32I-NEXT:    mv a2, a4
 ; RV32I-NEXT:    mv a3, a5
-; RV32I-NEXT:    bnez a0, .LBB201_1
+; RV32I-NEXT:    bnez a0, .LBB211_1
 ; RV32I-NEXT:  # %bb.6: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB201_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB211_2 Depth=1
 ; RV32I-NEXT:    mv a2, s2
 ; RV32I-NEXT:    mv a3, s1
-; RV32I-NEXT:    j .LBB201_1
-; RV32I-NEXT:  .LBB201_7: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB211_1
+; RV32I-NEXT:  .LBB211_7: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a4
 ; RV32I-NEXT:    mv a1, a5
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
@@ -14437,9 +15073,9 @@ define i64 @atomicrmw_max_i64_acquire(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    lw a4, 0(a0)
 ; RV32IA-NEXT:    mv s1, a2
 ; RV32IA-NEXT:    mv s2, a1
-; RV32IA-NEXT:    j .LBB201_2
-; RV32IA-NEXT:  .LBB201_1: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB201_2 Depth=1
+; RV32IA-NEXT:    j .LBB211_2
+; RV32IA-NEXT:  .LBB211_1: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB211_2 Depth=1
 ; RV32IA-NEXT:    sw a4, 8(sp)
 ; RV32IA-NEXT:    sw a5, 12(sp)
 ; RV32IA-NEXT:    addi a1, sp, 8
@@ -14449,27 +15085,27 @@ define i64 @atomicrmw_max_i64_acquire(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
-; RV32IA-NEXT:    bnez a0, .LBB201_7
-; RV32IA-NEXT:  .LBB201_2: # %atomicrmw.start
+; RV32IA-NEXT:    bnez a0, .LBB211_7
+; RV32IA-NEXT:  .LBB211_2: # %atomicrmw.start
 ; RV32IA-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32IA-NEXT:    beq a5, s1, .LBB201_4
+; RV32IA-NEXT:    beq a5, s1, .LBB211_4
 ; RV32IA-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB201_2 Depth=1
+; RV32IA-NEXT:    # in Loop: Header=BB211_2 Depth=1
 ; RV32IA-NEXT:    slt a0, s1, a5
-; RV32IA-NEXT:    j .LBB201_5
-; RV32IA-NEXT:  .LBB201_4: # in Loop: Header=BB201_2 Depth=1
+; RV32IA-NEXT:    j .LBB211_5
+; RV32IA-NEXT:  .LBB211_4: # in Loop: Header=BB211_2 Depth=1
 ; RV32IA-NEXT:    sltu a0, s2, a4
-; RV32IA-NEXT:  .LBB201_5: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB201_2 Depth=1
+; RV32IA-NEXT:  .LBB211_5: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB211_2 Depth=1
 ; RV32IA-NEXT:    mv a2, a4
 ; RV32IA-NEXT:    mv a3, a5
-; RV32IA-NEXT:    bnez a0, .LBB201_1
+; RV32IA-NEXT:    bnez a0, .LBB211_1
 ; RV32IA-NEXT:  # %bb.6: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB201_2 Depth=1
+; RV32IA-NEXT:    # in Loop: Header=BB211_2 Depth=1
 ; RV32IA-NEXT:    mv a2, s2
 ; RV32IA-NEXT:    mv a3, s1
-; RV32IA-NEXT:    j .LBB201_1
-; RV32IA-NEXT:  .LBB201_7: # %atomicrmw.end
+; RV32IA-NEXT:    j .LBB211_1
+; RV32IA-NEXT:  .LBB211_7: # %atomicrmw.end
 ; RV32IA-NEXT:    mv a0, a4
 ; RV32IA-NEXT:    mv a1, a5
 ; RV32IA-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
@@ -14488,9 +15124,9 @@ define i64 @atomicrmw_max_i64_acquire(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    ld a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
-; RV64I-NEXT:    j .LBB201_2
-; RV64I-NEXT:  .LBB201_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB201_2 Depth=1
+; RV64I-NEXT:    j .LBB211_2
+; RV64I-NEXT:  .LBB211_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB211_2 Depth=1
 ; RV64I-NEXT:    sd a3, 0(sp)
 ; RV64I-NEXT:    mv a1, sp
 ; RV64I-NEXT:    li a3, 2
@@ -14498,16 +15134,16 @@ define i64 @atomicrmw_max_i64_acquire(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld a3, 0(sp)
-; RV64I-NEXT:    bnez a0, .LBB201_4
-; RV64I-NEXT:  .LBB201_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB211_4
+; RV64I-NEXT:  .LBB211_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    blt s1, a3, .LBB201_1
+; RV64I-NEXT:    blt s1, a3, .LBB211_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB201_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB211_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB201_1
-; RV64I-NEXT:  .LBB201_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB211_1
+; RV64I-NEXT:  .LBB211_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
@@ -14536,9 +15172,9 @@ define i64 @atomicrmw_max_i64_release(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    lw a4, 0(a0)
 ; RV32I-NEXT:    mv s1, a2
 ; RV32I-NEXT:    mv s2, a1
-; RV32I-NEXT:    j .LBB202_2
-; RV32I-NEXT:  .LBB202_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB202_2 Depth=1
+; RV32I-NEXT:    j .LBB212_2
+; RV32I-NEXT:  .LBB212_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB212_2 Depth=1
 ; RV32I-NEXT:    sw a4, 8(sp)
 ; RV32I-NEXT:    sw a5, 12(sp)
 ; RV32I-NEXT:    addi a1, sp, 8
@@ -14548,27 +15184,27 @@ define i64 @atomicrmw_max_i64_release(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
-; RV32I-NEXT:    bnez a0, .LBB202_7
-; RV32I-NEXT:  .LBB202_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB212_7
+; RV32I-NEXT:  .LBB212_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32I-NEXT:    beq a5, s1, .LBB202_4
+; RV32I-NEXT:    beq a5, s1, .LBB212_4
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB202_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB212_2 Depth=1
 ; RV32I-NEXT:    slt a0, s1, a5
-; RV32I-NEXT:    j .LBB202_5
-; RV32I-NEXT:  .LBB202_4: # in Loop: Header=BB202_2 Depth=1
+; RV32I-NEXT:    j .LBB212_5
+; RV32I-NEXT:  .LBB212_4: # in Loop: Header=BB212_2 Depth=1
 ; RV32I-NEXT:    sltu a0, s2, a4
-; RV32I-NEXT:  .LBB202_5: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB202_2 Depth=1
+; RV32I-NEXT:  .LBB212_5: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB212_2 Depth=1
 ; RV32I-NEXT:    mv a2, a4
 ; RV32I-NEXT:    mv a3, a5
-; RV32I-NEXT:    bnez a0, .LBB202_1
+; RV32I-NEXT:    bnez a0, .LBB212_1
 ; RV32I-NEXT:  # %bb.6: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB202_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB212_2 Depth=1
 ; RV32I-NEXT:    mv a2, s2
 ; RV32I-NEXT:    mv a3, s1
-; RV32I-NEXT:    j .LBB202_1
-; RV32I-NEXT:  .LBB202_7: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB212_1
+; RV32I-NEXT:  .LBB212_7: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a4
 ; RV32I-NEXT:    mv a1, a5
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
@@ -14590,9 +15226,9 @@ define i64 @atomicrmw_max_i64_release(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    lw a4, 0(a0)
 ; RV32IA-NEXT:    mv s1, a2
 ; RV32IA-NEXT:    mv s2, a1
-; RV32IA-NEXT:    j .LBB202_2
-; RV32IA-NEXT:  .LBB202_1: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB202_2 Depth=1
+; RV32IA-NEXT:    j .LBB212_2
+; RV32IA-NEXT:  .LBB212_1: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB212_2 Depth=1
 ; RV32IA-NEXT:    sw a4, 8(sp)
 ; RV32IA-NEXT:    sw a5, 12(sp)
 ; RV32IA-NEXT:    addi a1, sp, 8
@@ -14602,27 +15238,27 @@ define i64 @atomicrmw_max_i64_release(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
-; RV32IA-NEXT:    bnez a0, .LBB202_7
-; RV32IA-NEXT:  .LBB202_2: # %atomicrmw.start
+; RV32IA-NEXT:    bnez a0, .LBB212_7
+; RV32IA-NEXT:  .LBB212_2: # %atomicrmw.start
 ; RV32IA-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32IA-NEXT:    beq a5, s1, .LBB202_4
+; RV32IA-NEXT:    beq a5, s1, .LBB212_4
 ; RV32IA-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB202_2 Depth=1
+; RV32IA-NEXT:    # in Loop: Header=BB212_2 Depth=1
 ; RV32IA-NEXT:    slt a0, s1, a5
-; RV32IA-NEXT:    j .LBB202_5
-; RV32IA-NEXT:  .LBB202_4: # in Loop: Header=BB202_2 Depth=1
+; RV32IA-NEXT:    j .LBB212_5
+; RV32IA-NEXT:  .LBB212_4: # in Loop: Header=BB212_2 Depth=1
 ; RV32IA-NEXT:    sltu a0, s2, a4
-; RV32IA-NEXT:  .LBB202_5: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB202_2 Depth=1
+; RV32IA-NEXT:  .LBB212_5: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB212_2 Depth=1
 ; RV32IA-NEXT:    mv a2, a4
 ; RV32IA-NEXT:    mv a3, a5
-; RV32IA-NEXT:    bnez a0, .LBB202_1
+; RV32IA-NEXT:    bnez a0, .LBB212_1
 ; RV32IA-NEXT:  # %bb.6: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB202_2 Depth=1
+; RV32IA-NEXT:    # in Loop: Header=BB212_2 Depth=1
 ; RV32IA-NEXT:    mv a2, s2
 ; RV32IA-NEXT:    mv a3, s1
-; RV32IA-NEXT:    j .LBB202_1
-; RV32IA-NEXT:  .LBB202_7: # %atomicrmw.end
+; RV32IA-NEXT:    j .LBB212_1
+; RV32IA-NEXT:  .LBB212_7: # %atomicrmw.end
 ; RV32IA-NEXT:    mv a0, a4
 ; RV32IA-NEXT:    mv a1, a5
 ; RV32IA-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
@@ -14641,9 +15277,9 @@ define i64 @atomicrmw_max_i64_release(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    ld a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
-; RV64I-NEXT:    j .LBB202_2
-; RV64I-NEXT:  .LBB202_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB202_2 Depth=1
+; RV64I-NEXT:    j .LBB212_2
+; RV64I-NEXT:  .LBB212_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB212_2 Depth=1
 ; RV64I-NEXT:    sd a3, 0(sp)
 ; RV64I-NEXT:    mv a1, sp
 ; RV64I-NEXT:    li a3, 3
@@ -14651,16 +15287,16 @@ define i64 @atomicrmw_max_i64_release(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld a3, 0(sp)
-; RV64I-NEXT:    bnez a0, .LBB202_4
-; RV64I-NEXT:  .LBB202_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB212_4
+; RV64I-NEXT:  .LBB212_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    blt s1, a3, .LBB202_1
+; RV64I-NEXT:    blt s1, a3, .LBB212_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB202_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB212_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB202_1
-; RV64I-NEXT:  .LBB202_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB212_1
+; RV64I-NEXT:  .LBB212_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
@@ -14689,9 +15325,9 @@ define i64 @atomicrmw_max_i64_acq_rel(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    lw a4, 0(a0)
 ; RV32I-NEXT:    mv s1, a2
 ; RV32I-NEXT:    mv s2, a1
-; RV32I-NEXT:    j .LBB203_2
-; RV32I-NEXT:  .LBB203_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB203_2 Depth=1
+; RV32I-NEXT:    j .LBB213_2
+; RV32I-NEXT:  .LBB213_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB213_2 Depth=1
 ; RV32I-NEXT:    sw a4, 8(sp)
 ; RV32I-NEXT:    sw a5, 12(sp)
 ; RV32I-NEXT:    addi a1, sp, 8
@@ -14701,27 +15337,27 @@ define i64 @atomicrmw_max_i64_acq_rel(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
-; RV32I-NEXT:    bnez a0, .LBB203_7
-; RV32I-NEXT:  .LBB203_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB213_7
+; RV32I-NEXT:  .LBB213_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32I-NEXT:    beq a5, s1, .LBB203_4
+; RV32I-NEXT:    beq a5, s1, .LBB213_4
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB203_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB213_2 Depth=1
 ; RV32I-NEXT:    slt a0, s1, a5
-; RV32I-NEXT:    j .LBB203_5
-; RV32I-NEXT:  .LBB203_4: # in Loop: Header=BB203_2 Depth=1
+; RV32I-NEXT:    j .LBB213_5
+; RV32I-NEXT:  .LBB213_4: # in Loop: Header=BB213_2 Depth=1
 ; RV32I-NEXT:    sltu a0, s2, a4
-; RV32I-NEXT:  .LBB203_5: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB203_2 Depth=1
+; RV32I-NEXT:  .LBB213_5: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB213_2 Depth=1
 ; RV32I-NEXT:    mv a2, a4
 ; RV32I-NEXT:    mv a3, a5
-; RV32I-NEXT:    bnez a0, .LBB203_1
+; RV32I-NEXT:    bnez a0, .LBB213_1
 ; RV32I-NEXT:  # %bb.6: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB203_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB213_2 Depth=1
 ; RV32I-NEXT:    mv a2, s2
 ; RV32I-NEXT:    mv a3, s1
-; RV32I-NEXT:    j .LBB203_1
-; RV32I-NEXT:  .LBB203_7: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB213_1
+; RV32I-NEXT:  .LBB213_7: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a4
 ; RV32I-NEXT:    mv a1, a5
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
@@ -14743,9 +15379,9 @@ define i64 @atomicrmw_max_i64_acq_rel(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    lw a4, 0(a0)
 ; RV32IA-NEXT:    mv s1, a2
 ; RV32IA-NEXT:    mv s2, a1
-; RV32IA-NEXT:    j .LBB203_2
-; RV32IA-NEXT:  .LBB203_1: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB203_2 Depth=1
+; RV32IA-NEXT:    j .LBB213_2
+; RV32IA-NEXT:  .LBB213_1: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB213_2 Depth=1
 ; RV32IA-NEXT:    sw a4, 8(sp)
 ; RV32IA-NEXT:    sw a5, 12(sp)
 ; RV32IA-NEXT:    addi a1, sp, 8
@@ -14755,27 +15391,27 @@ define i64 @atomicrmw_max_i64_acq_rel(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
-; RV32IA-NEXT:    bnez a0, .LBB203_7
-; RV32IA-NEXT:  .LBB203_2: # %atomicrmw.start
+; RV32IA-NEXT:    bnez a0, .LBB213_7
+; RV32IA-NEXT:  .LBB213_2: # %atomicrmw.start
 ; RV32IA-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32IA-NEXT:    beq a5, s1, .LBB203_4
+; RV32IA-NEXT:    beq a5, s1, .LBB213_4
 ; RV32IA-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB203_2 Depth=1
+; RV32IA-NEXT:    # in Loop: Header=BB213_2 Depth=1
 ; RV32IA-NEXT:    slt a0, s1, a5
-; RV32IA-NEXT:    j .LBB203_5
-; RV32IA-NEXT:  .LBB203_4: # in Loop: Header=BB203_2 Depth=1
+; RV32IA-NEXT:    j .LBB213_5
+; RV32IA-NEXT:  .LBB213_4: # in Loop: Header=BB213_2 Depth=1
 ; RV32IA-NEXT:    sltu a0, s2, a4
-; RV32IA-NEXT:  .LBB203_5: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB203_2 Depth=1
+; RV32IA-NEXT:  .LBB213_5: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB213_2 Depth=1
 ; RV32IA-NEXT:    mv a2, a4
 ; RV32IA-NEXT:    mv a3, a5
-; RV32IA-NEXT:    bnez a0, .LBB203_1
+; RV32IA-NEXT:    bnez a0, .LBB213_1
 ; RV32IA-NEXT:  # %bb.6: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB203_2 Depth=1
+; RV32IA-NEXT:    # in Loop: Header=BB213_2 Depth=1
 ; RV32IA-NEXT:    mv a2, s2
 ; RV32IA-NEXT:    mv a3, s1
-; RV32IA-NEXT:    j .LBB203_1
-; RV32IA-NEXT:  .LBB203_7: # %atomicrmw.end
+; RV32IA-NEXT:    j .LBB213_1
+; RV32IA-NEXT:  .LBB213_7: # %atomicrmw.end
 ; RV32IA-NEXT:    mv a0, a4
 ; RV32IA-NEXT:    mv a1, a5
 ; RV32IA-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
@@ -14794,9 +15430,9 @@ define i64 @atomicrmw_max_i64_acq_rel(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    ld a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
-; RV64I-NEXT:    j .LBB203_2
-; RV64I-NEXT:  .LBB203_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB203_2 Depth=1
+; RV64I-NEXT:    j .LBB213_2
+; RV64I-NEXT:  .LBB213_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB213_2 Depth=1
 ; RV64I-NEXT:    sd a3, 0(sp)
 ; RV64I-NEXT:    mv a1, sp
 ; RV64I-NEXT:    li a3, 4
@@ -14804,16 +15440,16 @@ define i64 @atomicrmw_max_i64_acq_rel(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld a3, 0(sp)
-; RV64I-NEXT:    bnez a0, .LBB203_4
-; RV64I-NEXT:  .LBB203_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB213_4
+; RV64I-NEXT:  .LBB213_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    blt s1, a3, .LBB203_1
+; RV64I-NEXT:    blt s1, a3, .LBB213_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB203_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB213_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB203_1
-; RV64I-NEXT:  .LBB203_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB213_1
+; RV64I-NEXT:  .LBB213_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
@@ -14842,9 +15478,9 @@ define i64 @atomicrmw_max_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    lw a4, 0(a0)
 ; RV32I-NEXT:    mv s1, a2
 ; RV32I-NEXT:    mv s2, a1
-; RV32I-NEXT:    j .LBB204_2
-; RV32I-NEXT:  .LBB204_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB204_2 Depth=1
+; RV32I-NEXT:    j .LBB214_2
+; RV32I-NEXT:  .LBB214_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB214_2 Depth=1
 ; RV32I-NEXT:    sw a4, 8(sp)
 ; RV32I-NEXT:    sw a5, 12(sp)
 ; RV32I-NEXT:    addi a1, sp, 8
@@ -14854,27 +15490,27 @@ define i64 @atomicrmw_max_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
-; RV32I-NEXT:    bnez a0, .LBB204_7
-; RV32I-NEXT:  .LBB204_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB214_7
+; RV32I-NEXT:  .LBB214_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32I-NEXT:    beq a5, s1, .LBB204_4
+; RV32I-NEXT:    beq a5, s1, .LBB214_4
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB204_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB214_2 Depth=1
 ; RV32I-NEXT:    slt a0, s1, a5
-; RV32I-NEXT:    j .LBB204_5
-; RV32I-NEXT:  .LBB204_4: # in Loop: Header=BB204_2 Depth=1
+; RV32I-NEXT:    j .LBB214_5
+; RV32I-NEXT:  .LBB214_4: # in Loop: Header=BB214_2 Depth=1
 ; RV32I-NEXT:    sltu a0, s2, a4
-; RV32I-NEXT:  .LBB204_5: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB204_2 Depth=1
+; RV32I-NEXT:  .LBB214_5: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB214_2 Depth=1
 ; RV32I-NEXT:    mv a2, a4
 ; RV32I-NEXT:    mv a3, a5
-; RV32I-NEXT:    bnez a0, .LBB204_1
+; RV32I-NEXT:    bnez a0, .LBB214_1
 ; RV32I-NEXT:  # %bb.6: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB204_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB214_2 Depth=1
 ; RV32I-NEXT:    mv a2, s2
 ; RV32I-NEXT:    mv a3, s1
-; RV32I-NEXT:    j .LBB204_1
-; RV32I-NEXT:  .LBB204_7: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB214_1
+; RV32I-NEXT:  .LBB214_7: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a4
 ; RV32I-NEXT:    mv a1, a5
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
@@ -14896,9 +15532,9 @@ define i64 @atomicrmw_max_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    lw a4, 0(a0)
 ; RV32IA-NEXT:    mv s1, a2
 ; RV32IA-NEXT:    mv s2, a1
-; RV32IA-NEXT:    j .LBB204_2
-; RV32IA-NEXT:  .LBB204_1: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB204_2 Depth=1
+; RV32IA-NEXT:    j .LBB214_2
+; RV32IA-NEXT:  .LBB214_1: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB214_2 Depth=1
 ; RV32IA-NEXT:    sw a4, 8(sp)
 ; RV32IA-NEXT:    sw a5, 12(sp)
 ; RV32IA-NEXT:    addi a1, sp, 8
@@ -14908,27 +15544,27 @@ define i64 @atomicrmw_max_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
-; RV32IA-NEXT:    bnez a0, .LBB204_7
-; RV32IA-NEXT:  .LBB204_2: # %atomicrmw.start
+; RV32IA-NEXT:    bnez a0, .LBB214_7
+; RV32IA-NEXT:  .LBB214_2: # %atomicrmw.start
 ; RV32IA-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32IA-NEXT:    beq a5, s1, .LBB204_4
+; RV32IA-NEXT:    beq a5, s1, .LBB214_4
 ; RV32IA-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB204_2 Depth=1
+; RV32IA-NEXT:    # in Loop: Header=BB214_2 Depth=1
 ; RV32IA-NEXT:    slt a0, s1, a5
-; RV32IA-NEXT:    j .LBB204_5
-; RV32IA-NEXT:  .LBB204_4: # in Loop: Header=BB204_2 Depth=1
+; RV32IA-NEXT:    j .LBB214_5
+; RV32IA-NEXT:  .LBB214_4: # in Loop: Header=BB214_2 Depth=1
 ; RV32IA-NEXT:    sltu a0, s2, a4
-; RV32IA-NEXT:  .LBB204_5: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB204_2 Depth=1
+; RV32IA-NEXT:  .LBB214_5: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB214_2 Depth=1
 ; RV32IA-NEXT:    mv a2, a4
 ; RV32IA-NEXT:    mv a3, a5
-; RV32IA-NEXT:    bnez a0, .LBB204_1
+; RV32IA-NEXT:    bnez a0, .LBB214_1
 ; RV32IA-NEXT:  # %bb.6: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB204_2 Depth=1
+; RV32IA-NEXT:    # in Loop: Header=BB214_2 Depth=1
 ; RV32IA-NEXT:    mv a2, s2
 ; RV32IA-NEXT:    mv a3, s1
-; RV32IA-NEXT:    j .LBB204_1
-; RV32IA-NEXT:  .LBB204_7: # %atomicrmw.end
+; RV32IA-NEXT:    j .LBB214_1
+; RV32IA-NEXT:  .LBB214_7: # %atomicrmw.end
 ; RV32IA-NEXT:    mv a0, a4
 ; RV32IA-NEXT:    mv a1, a5
 ; RV32IA-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
@@ -14947,9 +15583,9 @@ define i64 @atomicrmw_max_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    ld a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
-; RV64I-NEXT:    j .LBB204_2
-; RV64I-NEXT:  .LBB204_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB204_2 Depth=1
+; RV64I-NEXT:    j .LBB214_2
+; RV64I-NEXT:  .LBB214_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB214_2 Depth=1
 ; RV64I-NEXT:    sd a3, 0(sp)
 ; RV64I-NEXT:    mv a1, sp
 ; RV64I-NEXT:    li a3, 5
@@ -14957,16 +15593,16 @@ define i64 @atomicrmw_max_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld a3, 0(sp)
-; RV64I-NEXT:    bnez a0, .LBB204_4
-; RV64I-NEXT:  .LBB204_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB214_4
+; RV64I-NEXT:  .LBB214_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    blt s1, a3, .LBB204_1
+; RV64I-NEXT:    blt s1, a3, .LBB214_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB204_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB214_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB204_1
-; RV64I-NEXT:  .LBB204_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB214_1
+; RV64I-NEXT:  .LBB214_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
@@ -14995,9 +15631,9 @@ define i64 @atomicrmw_min_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    lw a4, 0(a0)
 ; RV32I-NEXT:    mv s1, a2
 ; RV32I-NEXT:    mv s2, a1
-; RV32I-NEXT:    j .LBB205_2
-; RV32I-NEXT:  .LBB205_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB205_2 Depth=1
+; RV32I-NEXT:    j .LBB215_2
+; RV32I-NEXT:  .LBB215_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB215_2 Depth=1
 ; RV32I-NEXT:    sw a4, 8(sp)
 ; RV32I-NEXT:    sw a5, 12(sp)
 ; RV32I-NEXT:    addi a1, sp, 8
@@ -15007,27 +15643,27 @@ define i64 @atomicrmw_min_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
-; RV32I-NEXT:    bnez a0, .LBB205_7
-; RV32I-NEXT:  .LBB205_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB215_7
+; RV32I-NEXT:  .LBB215_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32I-NEXT:    beq a5, s1, .LBB205_4
+; RV32I-NEXT:    beq a5, s1, .LBB215_4
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB205_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB215_2 Depth=1
 ; RV32I-NEXT:    slt a0, s1, a5
-; RV32I-NEXT:    j .LBB205_5
-; RV32I-NEXT:  .LBB205_4: # in Loop: Header=BB205_2 Depth=1
+; RV32I-NEXT:    j .LBB215_5
+; RV32I-NEXT:  .LBB215_4: # in Loop: Header=BB215_2 Depth=1
 ; RV32I-NEXT:    sltu a0, s2, a4
-; RV32I-NEXT:  .LBB205_5: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB205_2 Depth=1
+; RV32I-NEXT:  .LBB215_5: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB215_2 Depth=1
 ; RV32I-NEXT:    mv a2, a4
 ; RV32I-NEXT:    mv a3, a5
-; RV32I-NEXT:    beqz a0, .LBB205_1
+; RV32I-NEXT:    beqz a0, .LBB215_1
 ; RV32I-NEXT:  # %bb.6: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB205_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB215_2 Depth=1
 ; RV32I-NEXT:    mv a2, s2
 ; RV32I-NEXT:    mv a3, s1
-; RV32I-NEXT:    j .LBB205_1
-; RV32I-NEXT:  .LBB205_7: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB215_1
+; RV32I-NEXT:  .LBB215_7: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a4
 ; RV32I-NEXT:    mv a1, a5
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
@@ -15049,9 +15685,9 @@ define i64 @atomicrmw_min_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    lw a4, 0(a0)
 ; RV32IA-NEXT:    mv s1, a2
 ; RV32IA-NEXT:    mv s2, a1
-; RV32IA-NEXT:    j .LBB205_2
-; RV32IA-NEXT:  .LBB205_1: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB205_2 Depth=1
+; RV32IA-NEXT:    j .LBB215_2
+; RV32IA-NEXT:  .LBB215_1: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB215_2 Depth=1
 ; RV32IA-NEXT:    sw a4, 8(sp)
 ; RV32IA-NEXT:    sw a5, 12(sp)
 ; RV32IA-NEXT:    addi a1, sp, 8
@@ -15061,27 +15697,27 @@ define i64 @atomicrmw_min_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
-; RV32IA-NEXT:    bnez a0, .LBB205_7
-; RV32IA-NEXT:  .LBB205_2: # %atomicrmw.start
+; RV32IA-NEXT:    bnez a0, .LBB215_7
+; RV32IA-NEXT:  .LBB215_2: # %atomicrmw.start
 ; RV32IA-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32IA-NEXT:    beq a5, s1, .LBB205_4
+; RV32IA-NEXT:    beq a5, s1, .LBB215_4
 ; RV32IA-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB205_2 Depth=1
+; RV32IA-NEXT:    # in Loop: Header=BB215_2 Depth=1
 ; RV32IA-NEXT:    slt a0, s1, a5
-; RV32IA-NEXT:    j .LBB205_5
-; RV32IA-NEXT:  .LBB205_4: # in Loop: Header=BB205_2 Depth=1
+; RV32IA-NEXT:    j .LBB215_5
+; RV32IA-NEXT:  .LBB215_4: # in Loop: Header=BB215_2 Depth=1
 ; RV32IA-NEXT:    sltu a0, s2, a4
-; RV32IA-NEXT:  .LBB205_5: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB205_2 Depth=1
+; RV32IA-NEXT:  .LBB215_5: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB215_2 Depth=1
 ; RV32IA-NEXT:    mv a2, a4
 ; RV32IA-NEXT:    mv a3, a5
-; RV32IA-NEXT:    beqz a0, .LBB205_1
+; RV32IA-NEXT:    beqz a0, .LBB215_1
 ; RV32IA-NEXT:  # %bb.6: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB205_2 Depth=1
+; RV32IA-NEXT:    # in Loop: Header=BB215_2 Depth=1
 ; RV32IA-NEXT:    mv a2, s2
 ; RV32IA-NEXT:    mv a3, s1
-; RV32IA-NEXT:    j .LBB205_1
-; RV32IA-NEXT:  .LBB205_7: # %atomicrmw.end
+; RV32IA-NEXT:    j .LBB215_1
+; RV32IA-NEXT:  .LBB215_7: # %atomicrmw.end
 ; RV32IA-NEXT:    mv a0, a4
 ; RV32IA-NEXT:    mv a1, a5
 ; RV32IA-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
@@ -15100,9 +15736,9 @@ define i64 @atomicrmw_min_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    ld a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
-; RV64I-NEXT:    j .LBB205_2
-; RV64I-NEXT:  .LBB205_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB205_2 Depth=1
+; RV64I-NEXT:    j .LBB215_2
+; RV64I-NEXT:  .LBB215_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB215_2 Depth=1
 ; RV64I-NEXT:    sd a3, 0(sp)
 ; RV64I-NEXT:    mv a1, sp
 ; RV64I-NEXT:    mv a0, s0
@@ -15110,16 +15746,16 @@ define i64 @atomicrmw_min_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld a3, 0(sp)
-; RV64I-NEXT:    bnez a0, .LBB205_4
-; RV64I-NEXT:  .LBB205_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB215_4
+; RV64I-NEXT:  .LBB215_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    bge s1, a3, .LBB205_1
+; RV64I-NEXT:    bge s1, a3, .LBB215_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB205_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB215_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB205_1
-; RV64I-NEXT:  .LBB205_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB215_1
+; RV64I-NEXT:  .LBB215_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
@@ -15148,9 +15784,9 @@ define i64 @atomicrmw_min_i64_acquire(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    lw a4, 0(a0)
 ; RV32I-NEXT:    mv s1, a2
 ; RV32I-NEXT:    mv s2, a1
-; RV32I-NEXT:    j .LBB206_2
-; RV32I-NEXT:  .LBB206_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB206_2 Depth=1
+; RV32I-NEXT:    j .LBB216_2
+; RV32I-NEXT:  .LBB216_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB216_2 Depth=1
 ; RV32I-NEXT:    sw a4, 8(sp)
 ; RV32I-NEXT:    sw a5, 12(sp)
 ; RV32I-NEXT:    addi a1, sp, 8
@@ -15160,27 +15796,27 @@ define i64 @atomicrmw_min_i64_acquire(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
-; RV32I-NEXT:    bnez a0, .LBB206_7
-; RV32I-NEXT:  .LBB206_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB216_7
+; RV32I-NEXT:  .LBB216_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32I-NEXT:    beq a5, s1, .LBB206_4
+; RV32I-NEXT:    beq a5, s1, .LBB216_4
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB206_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB216_2 Depth=1
 ; RV32I-NEXT:    slt a0, s1, a5
-; RV32I-NEXT:    j .LBB206_5
-; RV32I-NEXT:  .LBB206_4: # in Loop: Header=BB206_2 Depth=1
+; RV32I-NEXT:    j .LBB216_5
+; RV32I-NEXT:  .LBB216_4: # in Loop: Header=BB216_2 Depth=1
 ; RV32I-NEXT:    sltu a0, s2, a4
-; RV32I-NEXT:  .LBB206_5: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB206_2 Depth=1
+; RV32I-NEXT:  .LBB216_5: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB216_2 Depth=1
 ; RV32I-NEXT:    mv a2, a4
 ; RV32I-NEXT:    mv a3, a5
-; RV32I-NEXT:    beqz a0, .LBB206_1
+; RV32I-NEXT:    beqz a0, .LBB216_1
 ; RV32I-NEXT:  # %bb.6: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB206_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB216_2 Depth=1
 ; RV32I-NEXT:    mv a2, s2
 ; RV32I-NEXT:    mv a3, s1
-; RV32I-NEXT:    j .LBB206_1
-; RV32I-NEXT:  .LBB206_7: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB216_1
+; RV32I-NEXT:  .LBB216_7: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a4
 ; RV32I-NEXT:    mv a1, a5
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
@@ -15202,9 +15838,9 @@ define i64 @atomicrmw_min_i64_acquire(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    lw a4, 0(a0)
 ; RV32IA-NEXT:    mv s1, a2
 ; RV32IA-NEXT:    mv s2, a1
-; RV32IA-NEXT:    j .LBB206_2
-; RV32IA-NEXT:  .LBB206_1: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB206_2 Depth=1
+; RV32IA-NEXT:    j .LBB216_2
+; RV32IA-NEXT:  .LBB216_1: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB216_2 Depth=1
 ; RV32IA-NEXT:    sw a4, 8(sp)
 ; RV32IA-NEXT:    sw a5, 12(sp)
 ; RV32IA-NEXT:    addi a1, sp, 8
@@ -15214,27 +15850,27 @@ define i64 @atomicrmw_min_i64_acquire(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
-; RV32IA-NEXT:    bnez a0, .LBB206_7
-; RV32IA-NEXT:  .LBB206_2: # %atomicrmw.start
+; RV32IA-NEXT:    bnez a0, .LBB216_7
+; RV32IA-NEXT:  .LBB216_2: # %atomicrmw.start
 ; RV32IA-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32IA-NEXT:    beq a5, s1, .LBB206_4
+; RV32IA-NEXT:    beq a5, s1, .LBB216_4
 ; RV32IA-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB206_2 Depth=1
+; RV32IA-NEXT:    # in Loop: Header=BB216_2 Depth=1
 ; RV32IA-NEXT:    slt a0, s1, a5
-; RV32IA-NEXT:    j .LBB206_5
-; RV32IA-NEXT:  .LBB206_4: # in Loop: Header=BB206_2 Depth=1
+; RV32IA-NEXT:    j .LBB216_5
+; RV32IA-NEXT:  .LBB216_4: # in Loop: Header=BB216_2 Depth=1
 ; RV32IA-NEXT:    sltu a0, s2, a4
-; RV32IA-NEXT:  .LBB206_5: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB206_2 Depth=1
+; RV32IA-NEXT:  .LBB216_5: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB216_2 Depth=1
 ; RV32IA-NEXT:    mv a2, a4
 ; RV32IA-NEXT:    mv a3, a5
-; RV32IA-NEXT:    beqz a0, .LBB206_1
+; RV32IA-NEXT:    beqz a0, .LBB216_1
 ; RV32IA-NEXT:  # %bb.6: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB206_2 Depth=1
+; RV32IA-NEXT:    # in Loop: Header=BB216_2 Depth=1
 ; RV32IA-NEXT:    mv a2, s2
 ; RV32IA-NEXT:    mv a3, s1
-; RV32IA-NEXT:    j .LBB206_1
-; RV32IA-NEXT:  .LBB206_7: # %atomicrmw.end
+; RV32IA-NEXT:    j .LBB216_1
+; RV32IA-NEXT:  .LBB216_7: # %atomicrmw.end
 ; RV32IA-NEXT:    mv a0, a4
 ; RV32IA-NEXT:    mv a1, a5
 ; RV32IA-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
@@ -15253,9 +15889,9 @@ define i64 @atomicrmw_min_i64_acquire(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    ld a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
-; RV64I-NEXT:    j .LBB206_2
-; RV64I-NEXT:  .LBB206_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB206_2 Depth=1
+; RV64I-NEXT:    j .LBB216_2
+; RV64I-NEXT:  .LBB216_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB216_2 Depth=1
 ; RV64I-NEXT:    sd a3, 0(sp)
 ; RV64I-NEXT:    mv a1, sp
 ; RV64I-NEXT:    li a3, 2
@@ -15263,16 +15899,16 @@ define i64 @atomicrmw_min_i64_acquire(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld a3, 0(sp)
-; RV64I-NEXT:    bnez a0, .LBB206_4
-; RV64I-NEXT:  .LBB206_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB216_4
+; RV64I-NEXT:  .LBB216_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    bge s1, a3, .LBB206_1
+; RV64I-NEXT:    bge s1, a3, .LBB216_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB206_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB216_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB206_1
-; RV64I-NEXT:  .LBB206_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB216_1
+; RV64I-NEXT:  .LBB216_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
@@ -15301,9 +15937,9 @@ define i64 @atomicrmw_min_i64_release(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    lw a4, 0(a0)
 ; RV32I-NEXT:    mv s1, a2
 ; RV32I-NEXT:    mv s2, a1
-; RV32I-NEXT:    j .LBB207_2
-; RV32I-NEXT:  .LBB207_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB207_2 Depth=1
+; RV32I-NEXT:    j .LBB217_2
+; RV32I-NEXT:  .LBB217_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB217_2 Depth=1
 ; RV32I-NEXT:    sw a4, 8(sp)
 ; RV32I-NEXT:    sw a5, 12(sp)
 ; RV32I-NEXT:    addi a1, sp, 8
@@ -15313,27 +15949,27 @@ define i64 @atomicrmw_min_i64_release(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
-; RV32I-NEXT:    bnez a0, .LBB207_7
-; RV32I-NEXT:  .LBB207_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB217_7
+; RV32I-NEXT:  .LBB217_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32I-NEXT:    beq a5, s1, .LBB207_4
+; RV32I-NEXT:    beq a5, s1, .LBB217_4
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB207_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB217_2 Depth=1
 ; RV32I-NEXT:    slt a0, s1, a5
-; RV32I-NEXT:    j .LBB207_5
-; RV32I-NEXT:  .LBB207_4: # in Loop: Header=BB207_2 Depth=1
+; RV32I-NEXT:    j .LBB217_5
+; RV32I-NEXT:  .LBB217_4: # in Loop: Header=BB217_2 Depth=1
 ; RV32I-NEXT:    sltu a0, s2, a4
-; RV32I-NEXT:  .LBB207_5: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB207_2 Depth=1
+; RV32I-NEXT:  .LBB217_5: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB217_2 Depth=1
 ; RV32I-NEXT:    mv a2, a4
 ; RV32I-NEXT:    mv a3, a5
-; RV32I-NEXT:    beqz a0, .LBB207_1
+; RV32I-NEXT:    beqz a0, .LBB217_1
 ; RV32I-NEXT:  # %bb.6: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB207_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB217_2 Depth=1
 ; RV32I-NEXT:    mv a2, s2
 ; RV32I-NEXT:    mv a3, s1
-; RV32I-NEXT:    j .LBB207_1
-; RV32I-NEXT:  .LBB207_7: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB217_1
+; RV32I-NEXT:  .LBB217_7: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a4
 ; RV32I-NEXT:    mv a1, a5
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
@@ -15355,9 +15991,9 @@ define i64 @atomicrmw_min_i64_release(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    lw a4, 0(a0)
 ; RV32IA-NEXT:    mv s1, a2
 ; RV32IA-NEXT:    mv s2, a1
-; RV32IA-NEXT:    j .LBB207_2
-; RV32IA-NEXT:  .LBB207_1: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB207_2 Depth=1
+; RV32IA-NEXT:    j .LBB217_2
+; RV32IA-NEXT:  .LBB217_1: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB217_2 Depth=1
 ; RV32IA-NEXT:    sw a4, 8(sp)
 ; RV32IA-NEXT:    sw a5, 12(sp)
 ; RV32IA-NEXT:    addi a1, sp, 8
@@ -15367,27 +16003,27 @@ define i64 @atomicrmw_min_i64_release(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
-; RV32IA-NEXT:    bnez a0, .LBB207_7
-; RV32IA-NEXT:  .LBB207_2: # %atomicrmw.start
+; RV32IA-NEXT:    bnez a0, .LBB217_7
+; RV32IA-NEXT:  .LBB217_2: # %atomicrmw.start
 ; RV32IA-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32IA-NEXT:    beq a5, s1, .LBB207_4
+; RV32IA-NEXT:    beq a5, s1, .LBB217_4
 ; RV32IA-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB207_2 Depth=1
+; RV32IA-NEXT:    # in Loop: Header=BB217_2 Depth=1
 ; RV32IA-NEXT:    slt a0, s1, a5
-; RV32IA-NEXT:    j .LBB207_5
-; RV32IA-NEXT:  .LBB207_4: # in Loop: Header=BB207_2 Depth=1
+; RV32IA-NEXT:    j .LBB217_5
+; RV32IA-NEXT:  .LBB217_4: # in Loop: Header=BB217_2 Depth=1
 ; RV32IA-NEXT:    sltu a0, s2, a4
-; RV32IA-NEXT:  .LBB207_5: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB207_2 Depth=1
+; RV32IA-NEXT:  .LBB217_5: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB217_2 Depth=1
 ; RV32IA-NEXT:    mv a2, a4
 ; RV32IA-NEXT:    mv a3, a5
-; RV32IA-NEXT:    beqz a0, .LBB207_1
+; RV32IA-NEXT:    beqz a0, .LBB217_1
 ; RV32IA-NEXT:  # %bb.6: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB207_2 Depth=1
+; RV32IA-NEXT:    # in Loop: Header=BB217_2 Depth=1
 ; RV32IA-NEXT:    mv a2, s2
 ; RV32IA-NEXT:    mv a3, s1
-; RV32IA-NEXT:    j .LBB207_1
-; RV32IA-NEXT:  .LBB207_7: # %atomicrmw.end
+; RV32IA-NEXT:    j .LBB217_1
+; RV32IA-NEXT:  .LBB217_7: # %atomicrmw.end
 ; RV32IA-NEXT:    mv a0, a4
 ; RV32IA-NEXT:    mv a1, a5
 ; RV32IA-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
@@ -15406,9 +16042,9 @@ define i64 @atomicrmw_min_i64_release(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    ld a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
-; RV64I-NEXT:    j .LBB207_2
-; RV64I-NEXT:  .LBB207_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB207_2 Depth=1
+; RV64I-NEXT:    j .LBB217_2
+; RV64I-NEXT:  .LBB217_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB217_2 Depth=1
 ; RV64I-NEXT:    sd a3, 0(sp)
 ; RV64I-NEXT:    mv a1, sp
 ; RV64I-NEXT:    li a3, 3
@@ -15416,16 +16052,16 @@ define i64 @atomicrmw_min_i64_release(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld a3, 0(sp)
-; RV64I-NEXT:    bnez a0, .LBB207_4
-; RV64I-NEXT:  .LBB207_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB217_4
+; RV64I-NEXT:  .LBB217_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    bge s1, a3, .LBB207_1
+; RV64I-NEXT:    bge s1, a3, .LBB217_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB207_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB217_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB207_1
-; RV64I-NEXT:  .LBB207_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB217_1
+; RV64I-NEXT:  .LBB217_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
@@ -15454,9 +16090,9 @@ define i64 @atomicrmw_min_i64_acq_rel(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    lw a4, 0(a0)
 ; RV32I-NEXT:    mv s1, a2
 ; RV32I-NEXT:    mv s2, a1
-; RV32I-NEXT:    j .LBB208_2
-; RV32I-NEXT:  .LBB208_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB208_2 Depth=1
+; RV32I-NEXT:    j .LBB218_2
+; RV32I-NEXT:  .LBB218_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB218_2 Depth=1
 ; RV32I-NEXT:    sw a4, 8(sp)
 ; RV32I-NEXT:    sw a5, 12(sp)
 ; RV32I-NEXT:    addi a1, sp, 8
@@ -15466,27 +16102,27 @@ define i64 @atomicrmw_min_i64_acq_rel(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
-; RV32I-NEXT:    bnez a0, .LBB208_7
-; RV32I-NEXT:  .LBB208_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB218_7
+; RV32I-NEXT:  .LBB218_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32I-NEXT:    beq a5, s1, .LBB208_4
+; RV32I-NEXT:    beq a5, s1, .LBB218_4
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB208_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB218_2 Depth=1
 ; RV32I-NEXT:    slt a0, s1, a5
-; RV32I-NEXT:    j .LBB208_5
-; RV32I-NEXT:  .LBB208_4: # in Loop: Header=BB208_2 Depth=1
+; RV32I-NEXT:    j .LBB218_5
+; RV32I-NEXT:  .LBB218_4: # in Loop: Header=BB218_2 Depth=1
 ; RV32I-NEXT:    sltu a0, s2, a4
-; RV32I-NEXT:  .LBB208_5: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB208_2 Depth=1
+; RV32I-NEXT:  .LBB218_5: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB218_2 Depth=1
 ; RV32I-NEXT:    mv a2, a4
 ; RV32I-NEXT:    mv a3, a5
-; RV32I-NEXT:    beqz a0, .LBB208_1
+; RV32I-NEXT:    beqz a0, .LBB218_1
 ; RV32I-NEXT:  # %bb.6: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB208_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB218_2 Depth=1
 ; RV32I-NEXT:    mv a2, s2
 ; RV32I-NEXT:    mv a3, s1
-; RV32I-NEXT:    j .LBB208_1
-; RV32I-NEXT:  .LBB208_7: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB218_1
+; RV32I-NEXT:  .LBB218_7: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a4
 ; RV32I-NEXT:    mv a1, a5
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
@@ -15508,9 +16144,9 @@ define i64 @atomicrmw_min_i64_acq_rel(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    lw a4, 0(a0)
 ; RV32IA-NEXT:    mv s1, a2
 ; RV32IA-NEXT:    mv s2, a1
-; RV32IA-NEXT:    j .LBB208_2
-; RV32IA-NEXT:  .LBB208_1: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB208_2 Depth=1
+; RV32IA-NEXT:    j .LBB218_2
+; RV32IA-NEXT:  .LBB218_1: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB218_2 Depth=1
 ; RV32IA-NEXT:    sw a4, 8(sp)
 ; RV32IA-NEXT:    sw a5, 12(sp)
 ; RV32IA-NEXT:    addi a1, sp, 8
@@ -15520,27 +16156,27 @@ define i64 @atomicrmw_min_i64_acq_rel(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
-; RV32IA-NEXT:    bnez a0, .LBB208_7
-; RV32IA-NEXT:  .LBB208_2: # %atomicrmw.start
+; RV32IA-NEXT:    bnez a0, .LBB218_7
+; RV32IA-NEXT:  .LBB218_2: # %atomicrmw.start
 ; RV32IA-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32IA-NEXT:    beq a5, s1, .LBB208_4
+; RV32IA-NEXT:    beq a5, s1, .LBB218_4
 ; RV32IA-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB208_2 Depth=1
+; RV32IA-NEXT:    # in Loop: Header=BB218_2 Depth=1
 ; RV32IA-NEXT:    slt a0, s1, a5
-; RV32IA-NEXT:    j .LBB208_5
-; RV32IA-NEXT:  .LBB208_4: # in Loop: Header=BB208_2 Depth=1
+; RV32IA-NEXT:    j .LBB218_5
+; RV32IA-NEXT:  .LBB218_4: # in Loop: Header=BB218_2 Depth=1
 ; RV32IA-NEXT:    sltu a0, s2, a4
-; RV32IA-NEXT:  .LBB208_5: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB208_2 Depth=1
+; RV32IA-NEXT:  .LBB218_5: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB218_2 Depth=1
 ; RV32IA-NEXT:    mv a2, a4
 ; RV32IA-NEXT:    mv a3, a5
-; RV32IA-NEXT:    beqz a0, .LBB208_1
+; RV32IA-NEXT:    beqz a0, .LBB218_1
 ; RV32IA-NEXT:  # %bb.6: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB208_2 Depth=1
+; RV32IA-NEXT:    # in Loop: Header=BB218_2 Depth=1
 ; RV32IA-NEXT:    mv a2, s2
 ; RV32IA-NEXT:    mv a3, s1
-; RV32IA-NEXT:    j .LBB208_1
-; RV32IA-NEXT:  .LBB208_7: # %atomicrmw.end
+; RV32IA-NEXT:    j .LBB218_1
+; RV32IA-NEXT:  .LBB218_7: # %atomicrmw.end
 ; RV32IA-NEXT:    mv a0, a4
 ; RV32IA-NEXT:    mv a1, a5
 ; RV32IA-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
@@ -15559,9 +16195,9 @@ define i64 @atomicrmw_min_i64_acq_rel(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    ld a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
-; RV64I-NEXT:    j .LBB208_2
-; RV64I-NEXT:  .LBB208_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB208_2 Depth=1
+; RV64I-NEXT:    j .LBB218_2
+; RV64I-NEXT:  .LBB218_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB218_2 Depth=1
 ; RV64I-NEXT:    sd a3, 0(sp)
 ; RV64I-NEXT:    mv a1, sp
 ; RV64I-NEXT:    li a3, 4
@@ -15569,16 +16205,16 @@ define i64 @atomicrmw_min_i64_acq_rel(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld a3, 0(sp)
-; RV64I-NEXT:    bnez a0, .LBB208_4
-; RV64I-NEXT:  .LBB208_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB218_4
+; RV64I-NEXT:  .LBB218_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    bge s1, a3, .LBB208_1
+; RV64I-NEXT:    bge s1, a3, .LBB218_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB208_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB218_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB208_1
-; RV64I-NEXT:  .LBB208_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB218_1
+; RV64I-NEXT:  .LBB218_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
@@ -15607,9 +16243,9 @@ define i64 @atomicrmw_min_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    lw a4, 0(a0)
 ; RV32I-NEXT:    mv s1, a2
 ; RV32I-NEXT:    mv s2, a1
-; RV32I-NEXT:    j .LBB209_2
-; RV32I-NEXT:  .LBB209_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB209_2 Depth=1
+; RV32I-NEXT:    j .LBB219_2
+; RV32I-NEXT:  .LBB219_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB219_2 Depth=1
 ; RV32I-NEXT:    sw a4, 8(sp)
 ; RV32I-NEXT:    sw a5, 12(sp)
 ; RV32I-NEXT:    addi a1, sp, 8
@@ -15619,27 +16255,27 @@ define i64 @atomicrmw_min_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
-; RV32I-NEXT:    bnez a0, .LBB209_7
-; RV32I-NEXT:  .LBB209_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB219_7
+; RV32I-NEXT:  .LBB219_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32I-NEXT:    beq a5, s1, .LBB209_4
+; RV32I-NEXT:    beq a5, s1, .LBB219_4
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB209_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB219_2 Depth=1
 ; RV32I-NEXT:    slt a0, s1, a5
-; RV32I-NEXT:    j .LBB209_5
-; RV32I-NEXT:  .LBB209_4: # in Loop: Header=BB209_2 Depth=1
+; RV32I-NEXT:    j .LBB219_5
+; RV32I-NEXT:  .LBB219_4: # in Loop: Header=BB219_2 Depth=1
 ; RV32I-NEXT:    sltu a0, s2, a4
-; RV32I-NEXT:  .LBB209_5: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB209_2 Depth=1
+; RV32I-NEXT:  .LBB219_5: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB219_2 Depth=1
 ; RV32I-NEXT:    mv a2, a4
 ; RV32I-NEXT:    mv a3, a5
-; RV32I-NEXT:    beqz a0, .LBB209_1
+; RV32I-NEXT:    beqz a0, .LBB219_1
 ; RV32I-NEXT:  # %bb.6: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB209_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB219_2 Depth=1
 ; RV32I-NEXT:    mv a2, s2
 ; RV32I-NEXT:    mv a3, s1
-; RV32I-NEXT:    j .LBB209_1
-; RV32I-NEXT:  .LBB209_7: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB219_1
+; RV32I-NEXT:  .LBB219_7: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a4
 ; RV32I-NEXT:    mv a1, a5
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
@@ -15661,9 +16297,9 @@ define i64 @atomicrmw_min_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    lw a4, 0(a0)
 ; RV32IA-NEXT:    mv s1, a2
 ; RV32IA-NEXT:    mv s2, a1
-; RV32IA-NEXT:    j .LBB209_2
-; RV32IA-NEXT:  .LBB209_1: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB209_2 Depth=1
+; RV32IA-NEXT:    j .LBB219_2
+; RV32IA-NEXT:  .LBB219_1: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB219_2 Depth=1
 ; RV32IA-NEXT:    sw a4, 8(sp)
 ; RV32IA-NEXT:    sw a5, 12(sp)
 ; RV32IA-NEXT:    addi a1, sp, 8
@@ -15673,27 +16309,27 @@ define i64 @atomicrmw_min_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
-; RV32IA-NEXT:    bnez a0, .LBB209_7
-; RV32IA-NEXT:  .LBB209_2: # %atomicrmw.start
+; RV32IA-NEXT:    bnez a0, .LBB219_7
+; RV32IA-NEXT:  .LBB219_2: # %atomicrmw.start
 ; RV32IA-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32IA-NEXT:    beq a5, s1, .LBB209_4
+; RV32IA-NEXT:    beq a5, s1, .LBB219_4
 ; RV32IA-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB209_2 Depth=1
+; RV32IA-NEXT:    # in Loop: Header=BB219_2 Depth=1
 ; RV32IA-NEXT:    slt a0, s1, a5
-; RV32IA-NEXT:    j .LBB209_5
-; RV32IA-NEXT:  .LBB209_4: # in Loop: Header=BB209_2 Depth=1
+; RV32IA-NEXT:    j .LBB219_5
+; RV32IA-NEXT:  .LBB219_4: # in Loop: Header=BB219_2 Depth=1
 ; RV32IA-NEXT:    sltu a0, s2, a4
-; RV32IA-NEXT:  .LBB209_5: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB209_2 Depth=1
+; RV32IA-NEXT:  .LBB219_5: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB219_2 Depth=1
 ; RV32IA-NEXT:    mv a2, a4
 ; RV32IA-NEXT:    mv a3, a5
-; RV32IA-NEXT:    beqz a0, .LBB209_1
+; RV32IA-NEXT:    beqz a0, .LBB219_1
 ; RV32IA-NEXT:  # %bb.6: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB209_2 Depth=1
+; RV32IA-NEXT:    # in Loop: Header=BB219_2 Depth=1
 ; RV32IA-NEXT:    mv a2, s2
 ; RV32IA-NEXT:    mv a3, s1
-; RV32IA-NEXT:    j .LBB209_1
-; RV32IA-NEXT:  .LBB209_7: # %atomicrmw.end
+; RV32IA-NEXT:    j .LBB219_1
+; RV32IA-NEXT:  .LBB219_7: # %atomicrmw.end
 ; RV32IA-NEXT:    mv a0, a4
 ; RV32IA-NEXT:    mv a1, a5
 ; RV32IA-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
@@ -15712,9 +16348,9 @@ define i64 @atomicrmw_min_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    ld a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
-; RV64I-NEXT:    j .LBB209_2
-; RV64I-NEXT:  .LBB209_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB209_2 Depth=1
+; RV64I-NEXT:    j .LBB219_2
+; RV64I-NEXT:  .LBB219_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB219_2 Depth=1
 ; RV64I-NEXT:    sd a3, 0(sp)
 ; RV64I-NEXT:    mv a1, sp
 ; RV64I-NEXT:    li a3, 5
@@ -15722,16 +16358,16 @@ define i64 @atomicrmw_min_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld a3, 0(sp)
-; RV64I-NEXT:    bnez a0, .LBB209_4
-; RV64I-NEXT:  .LBB209_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB219_4
+; RV64I-NEXT:  .LBB219_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    bge s1, a3, .LBB209_1
+; RV64I-NEXT:    bge s1, a3, .LBB219_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB209_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB219_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB209_1
-; RV64I-NEXT:  .LBB209_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB219_1
+; RV64I-NEXT:  .LBB219_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
@@ -15760,9 +16396,9 @@ define i64 @atomicrmw_umax_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    lw a4, 0(a0)
 ; RV32I-NEXT:    mv s1, a2
 ; RV32I-NEXT:    mv s2, a1
-; RV32I-NEXT:    j .LBB210_2
-; RV32I-NEXT:  .LBB210_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB210_2 Depth=1
+; RV32I-NEXT:    j .LBB220_2
+; RV32I-NEXT:  .LBB220_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB220_2 Depth=1
 ; RV32I-NEXT:    sw a4, 8(sp)
 ; RV32I-NEXT:    sw a5, 12(sp)
 ; RV32I-NEXT:    addi a1, sp, 8
@@ -15772,27 +16408,27 @@ define i64 @atomicrmw_umax_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
-; RV32I-NEXT:    bnez a0, .LBB210_7
-; RV32I-NEXT:  .LBB210_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB220_7
+; RV32I-NEXT:  .LBB220_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32I-NEXT:    beq a5, s1, .LBB210_4
+; RV32I-NEXT:    beq a5, s1, .LBB220_4
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB210_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB220_2 Depth=1
 ; RV32I-NEXT:    sltu a0, s1, a5
-; RV32I-NEXT:    j .LBB210_5
-; RV32I-NEXT:  .LBB210_4: # in Loop: Header=BB210_2 Depth=1
+; RV32I-NEXT:    j .LBB220_5
+; RV32I-NEXT:  .LBB220_4: # in Loop: Header=BB220_2 Depth=1
 ; RV32I-NEXT:    sltu a0, s2, a4
-; RV32I-NEXT:  .LBB210_5: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB210_2 Depth=1
+; RV32I-NEXT:  .LBB220_5: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB220_2 Depth=1
 ; RV32I-NEXT:    mv a2, a4
 ; RV32I-NEXT:    mv a3, a5
-; RV32I-NEXT:    bnez a0, .LBB210_1
+; RV32I-NEXT:    bnez a0, .LBB220_1
 ; RV32I-NEXT:  # %bb.6: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB210_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB220_2 Depth=1
 ; RV32I-NEXT:    mv a2, s2
 ; RV32I-NEXT:    mv a3, s1
-; RV32I-NEXT:    j .LBB210_1
-; RV32I-NEXT:  .LBB210_7: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB220_1
+; RV32I-NEXT:  .LBB220_7: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a4
 ; RV32I-NEXT:    mv a1, a5
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
@@ -15814,9 +16450,9 @@ define i64 @atomicrmw_umax_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    lw a4, 0(a0)
 ; RV32IA-NEXT:    mv s1, a2
 ; RV32IA-NEXT:    mv s2, a1
-; RV32IA-NEXT:    j .LBB210_2
-; RV32IA-NEXT:  .LBB210_1: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB210_2 Depth=1
+; RV32IA-NEXT:    j .LBB220_2
+; RV32IA-NEXT:  .LBB220_1: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB220_2 Depth=1
 ; RV32IA-NEXT:    sw a4, 8(sp)
 ; RV32IA-NEXT:    sw a5, 12(sp)
 ; RV32IA-NEXT:    addi a1, sp, 8
@@ -15826,27 +16462,27 @@ define i64 @atomicrmw_umax_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
-; RV32IA-NEXT:    bnez a0, .LBB210_7
-; RV32IA-NEXT:  .LBB210_2: # %atomicrmw.start
+; RV32IA-NEXT:    bnez a0, .LBB220_7
+; RV32IA-NEXT:  .LBB220_2: # %atomicrmw.start
 ; RV32IA-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32IA-NEXT:    beq a5, s1, .LBB210_4
+; RV32IA-NEXT:    beq a5, s1, .LBB220_4
 ; RV32IA-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB210_2 Depth=1
+; RV32IA-NEXT:    # in Loop: Header=BB220_2 Depth=1
 ; RV32IA-NEXT:    sltu a0, s1, a5
-; RV32IA-NEXT:    j .LBB210_5
-; RV32IA-NEXT:  .LBB210_4: # in Loop: Header=BB210_2 Depth=1
+; RV32IA-NEXT:    j .LBB220_5
+; RV32IA-NEXT:  .LBB220_4: # in Loop: Header=BB220_2 Depth=1
 ; RV32IA-NEXT:    sltu a0, s2, a4
-; RV32IA-NEXT:  .LBB210_5: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB210_2 Depth=1
+; RV32IA-NEXT:  .LBB220_5: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB220_2 Depth=1
 ; RV32IA-NEXT:    mv a2, a4
 ; RV32IA-NEXT:    mv a3, a5
-; RV32IA-NEXT:    bnez a0, .LBB210_1
+; RV32IA-NEXT:    bnez a0, .LBB220_1
 ; RV32IA-NEXT:  # %bb.6: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB210_2 Depth=1
+; RV32IA-NEXT:    # in Loop: Header=BB220_2 Depth=1
 ; RV32IA-NEXT:    mv a2, s2
 ; RV32IA-NEXT:    mv a3, s1
-; RV32IA-NEXT:    j .LBB210_1
-; RV32IA-NEXT:  .LBB210_7: # %atomicrmw.end
+; RV32IA-NEXT:    j .LBB220_1
+; RV32IA-NEXT:  .LBB220_7: # %atomicrmw.end
 ; RV32IA-NEXT:    mv a0, a4
 ; RV32IA-NEXT:    mv a1, a5
 ; RV32IA-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
@@ -15865,9 +16501,9 @@ define i64 @atomicrmw_umax_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    ld a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
-; RV64I-NEXT:    j .LBB210_2
-; RV64I-NEXT:  .LBB210_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB210_2 Depth=1
+; RV64I-NEXT:    j .LBB220_2
+; RV64I-NEXT:  .LBB220_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB220_2 Depth=1
 ; RV64I-NEXT:    sd a3, 0(sp)
 ; RV64I-NEXT:    mv a1, sp
 ; RV64I-NEXT:    mv a0, s0
@@ -15875,16 +16511,16 @@ define i64 @atomicrmw_umax_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld a3, 0(sp)
-; RV64I-NEXT:    bnez a0, .LBB210_4
-; RV64I-NEXT:  .LBB210_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB220_4
+; RV64I-NEXT:  .LBB220_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    bltu s1, a3, .LBB210_1
+; RV64I-NEXT:    bltu s1, a3, .LBB220_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB210_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB220_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB210_1
-; RV64I-NEXT:  .LBB210_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB220_1
+; RV64I-NEXT:  .LBB220_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
@@ -15913,9 +16549,9 @@ define i64 @atomicrmw_umax_i64_acquire(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    lw a4, 0(a0)
 ; RV32I-NEXT:    mv s1, a2
 ; RV32I-NEXT:    mv s2, a1
-; RV32I-NEXT:    j .LBB211_2
-; RV32I-NEXT:  .LBB211_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB211_2 Depth=1
+; RV32I-NEXT:    j .LBB221_2
+; RV32I-NEXT:  .LBB221_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB221_2 Depth=1
 ; RV32I-NEXT:    sw a4, 8(sp)
 ; RV32I-NEXT:    sw a5, 12(sp)
 ; RV32I-NEXT:    addi a1, sp, 8
@@ -15925,27 +16561,27 @@ define i64 @atomicrmw_umax_i64_acquire(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
-; RV32I-NEXT:    bnez a0, .LBB211_7
-; RV32I-NEXT:  .LBB211_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB221_7
+; RV32I-NEXT:  .LBB221_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32I-NEXT:    beq a5, s1, .LBB211_4
+; RV32I-NEXT:    beq a5, s1, .LBB221_4
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB211_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB221_2 Depth=1
 ; RV32I-NEXT:    sltu a0, s1, a5
-; RV32I-NEXT:    j .LBB211_5
-; RV32I-NEXT:  .LBB211_4: # in Loop: Header=BB211_2 Depth=1
+; RV32I-NEXT:    j .LBB221_5
+; RV32I-NEXT:  .LBB221_4: # in Loop: Header=BB221_2 Depth=1
 ; RV32I-NEXT:    sltu a0, s2, a4
-; RV32I-NEXT:  .LBB211_5: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB211_2 Depth=1
+; RV32I-NEXT:  .LBB221_5: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB221_2 Depth=1
 ; RV32I-NEXT:    mv a2, a4
 ; RV32I-NEXT:    mv a3, a5
-; RV32I-NEXT:    bnez a0, .LBB211_1
+; RV32I-NEXT:    bnez a0, .LBB221_1
 ; RV32I-NEXT:  # %bb.6: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB211_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB221_2 Depth=1
 ; RV32I-NEXT:    mv a2, s2
 ; RV32I-NEXT:    mv a3, s1
-; RV32I-NEXT:    j .LBB211_1
-; RV32I-NEXT:  .LBB211_7: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB221_1
+; RV32I-NEXT:  .LBB221_7: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a4
 ; RV32I-NEXT:    mv a1, a5
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
@@ -15967,9 +16603,9 @@ define i64 @atomicrmw_umax_i64_acquire(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    lw a4, 0(a0)
 ; RV32IA-NEXT:    mv s1, a2
 ; RV32IA-NEXT:    mv s2, a1
-; RV32IA-NEXT:    j .LBB211_2
-; RV32IA-NEXT:  .LBB211_1: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB211_2 Depth=1
+; RV32IA-NEXT:    j .LBB221_2
+; RV32IA-NEXT:  .LBB221_1: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB221_2 Depth=1
 ; RV32IA-NEXT:    sw a4, 8(sp)
 ; RV32IA-NEXT:    sw a5, 12(sp)
 ; RV32IA-NEXT:    addi a1, sp, 8
@@ -15979,27 +16615,27 @@ define i64 @atomicrmw_umax_i64_acquire(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
-; RV32IA-NEXT:    bnez a0, .LBB211_7
-; RV32IA-NEXT:  .LBB211_2: # %atomicrmw.start
+; RV32IA-NEXT:    bnez a0, .LBB221_7
+; RV32IA-NEXT:  .LBB221_2: # %atomicrmw.start
 ; RV32IA-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32IA-NEXT:    beq a5, s1, .LBB211_4
+; RV32IA-NEXT:    beq a5, s1, .LBB221_4
 ; RV32IA-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB211_2 Depth=1
+; RV32IA-NEXT:    # in Loop: Header=BB221_2 Depth=1
 ; RV32IA-NEXT:    sltu a0, s1, a5
-; RV32IA-NEXT:    j .LBB211_5
-; RV32IA-NEXT:  .LBB211_4: # in Loop: Header=BB211_2 Depth=1
+; RV32IA-NEXT:    j .LBB221_5
+; RV32IA-NEXT:  .LBB221_4: # in Loop: Header=BB221_2 Depth=1
 ; RV32IA-NEXT:    sltu a0, s2, a4
-; RV32IA-NEXT:  .LBB211_5: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB211_2 Depth=1
+; RV32IA-NEXT:  .LBB221_5: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB221_2 Depth=1
 ; RV32IA-NEXT:    mv a2, a4
 ; RV32IA-NEXT:    mv a3, a5
-; RV32IA-NEXT:    bnez a0, .LBB211_1
+; RV32IA-NEXT:    bnez a0, .LBB221_1
 ; RV32IA-NEXT:  # %bb.6: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB211_2 Depth=1
+; RV32IA-NEXT:    # in Loop: Header=BB221_2 Depth=1
 ; RV32IA-NEXT:    mv a2, s2
 ; RV32IA-NEXT:    mv a3, s1
-; RV32IA-NEXT:    j .LBB211_1
-; RV32IA-NEXT:  .LBB211_7: # %atomicrmw.end
+; RV32IA-NEXT:    j .LBB221_1
+; RV32IA-NEXT:  .LBB221_7: # %atomicrmw.end
 ; RV32IA-NEXT:    mv a0, a4
 ; RV32IA-NEXT:    mv a1, a5
 ; RV32IA-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
@@ -16018,9 +16654,9 @@ define i64 @atomicrmw_umax_i64_acquire(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    ld a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
-; RV64I-NEXT:    j .LBB211_2
-; RV64I-NEXT:  .LBB211_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB211_2 Depth=1
+; RV64I-NEXT:    j .LBB221_2
+; RV64I-NEXT:  .LBB221_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB221_2 Depth=1
 ; RV64I-NEXT:    sd a3, 0(sp)
 ; RV64I-NEXT:    mv a1, sp
 ; RV64I-NEXT:    li a3, 2
@@ -16028,16 +16664,16 @@ define i64 @atomicrmw_umax_i64_acquire(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld a3, 0(sp)
-; RV64I-NEXT:    bnez a0, .LBB211_4
-; RV64I-NEXT:  .LBB211_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB221_4
+; RV64I-NEXT:  .LBB221_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    bltu s1, a3, .LBB211_1
+; RV64I-NEXT:    bltu s1, a3, .LBB221_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB211_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB221_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB211_1
-; RV64I-NEXT:  .LBB211_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB221_1
+; RV64I-NEXT:  .LBB221_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
@@ -16066,9 +16702,9 @@ define i64 @atomicrmw_umax_i64_release(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    lw a4, 0(a0)
 ; RV32I-NEXT:    mv s1, a2
 ; RV32I-NEXT:    mv s2, a1
-; RV32I-NEXT:    j .LBB212_2
-; RV32I-NEXT:  .LBB212_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB212_2 Depth=1
+; RV32I-NEXT:    j .LBB222_2
+; RV32I-NEXT:  .LBB222_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB222_2 Depth=1
 ; RV32I-NEXT:    sw a4, 8(sp)
 ; RV32I-NEXT:    sw a5, 12(sp)
 ; RV32I-NEXT:    addi a1, sp, 8
@@ -16078,27 +16714,27 @@ define i64 @atomicrmw_umax_i64_release(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
-; RV32I-NEXT:    bnez a0, .LBB212_7
-; RV32I-NEXT:  .LBB212_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB222_7
+; RV32I-NEXT:  .LBB222_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32I-NEXT:    beq a5, s1, .LBB212_4
+; RV32I-NEXT:    beq a5, s1, .LBB222_4
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB212_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB222_2 Depth=1
 ; RV32I-NEXT:    sltu a0, s1, a5
-; RV32I-NEXT:    j .LBB212_5
-; RV32I-NEXT:  .LBB212_4: # in Loop: Header=BB212_2 Depth=1
+; RV32I-NEXT:    j .LBB222_5
+; RV32I-NEXT:  .LBB222_4: # in Loop: Header=BB222_2 Depth=1
 ; RV32I-NEXT:    sltu a0, s2, a4
-; RV32I-NEXT:  .LBB212_5: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB212_2 Depth=1
+; RV32I-NEXT:  .LBB222_5: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB222_2 Depth=1
 ; RV32I-NEXT:    mv a2, a4
 ; RV32I-NEXT:    mv a3, a5
-; RV32I-NEXT:    bnez a0, .LBB212_1
+; RV32I-NEXT:    bnez a0, .LBB222_1
 ; RV32I-NEXT:  # %bb.6: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB212_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB222_2 Depth=1
 ; RV32I-NEXT:    mv a2, s2
 ; RV32I-NEXT:    mv a3, s1
-; RV32I-NEXT:    j .LBB212_1
-; RV32I-NEXT:  .LBB212_7: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB222_1
+; RV32I-NEXT:  .LBB222_7: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a4
 ; RV32I-NEXT:    mv a1, a5
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
@@ -16120,9 +16756,9 @@ define i64 @atomicrmw_umax_i64_release(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    lw a4, 0(a0)
 ; RV32IA-NEXT:    mv s1, a2
 ; RV32IA-NEXT:    mv s2, a1
-; RV32IA-NEXT:    j .LBB212_2
-; RV32IA-NEXT:  .LBB212_1: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB212_2 Depth=1
+; RV32IA-NEXT:    j .LBB222_2
+; RV32IA-NEXT:  .LBB222_1: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB222_2 Depth=1
 ; RV32IA-NEXT:    sw a4, 8(sp)
 ; RV32IA-NEXT:    sw a5, 12(sp)
 ; RV32IA-NEXT:    addi a1, sp, 8
@@ -16132,27 +16768,27 @@ define i64 @atomicrmw_umax_i64_release(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
-; RV32IA-NEXT:    bnez a0, .LBB212_7
-; RV32IA-NEXT:  .LBB212_2: # %atomicrmw.start
+; RV32IA-NEXT:    bnez a0, .LBB222_7
+; RV32IA-NEXT:  .LBB222_2: # %atomicrmw.start
 ; RV32IA-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32IA-NEXT:    beq a5, s1, .LBB212_4
+; RV32IA-NEXT:    beq a5, s1, .LBB222_4
 ; RV32IA-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB212_2 Depth=1
+; RV32IA-NEXT:    # in Loop: Header=BB222_2 Depth=1
 ; RV32IA-NEXT:    sltu a0, s1, a5
-; RV32IA-NEXT:    j .LBB212_5
-; RV32IA-NEXT:  .LBB212_4: # in Loop: Header=BB212_2 Depth=1
+; RV32IA-NEXT:    j .LBB222_5
+; RV32IA-NEXT:  .LBB222_4: # in Loop: Header=BB222_2 Depth=1
 ; RV32IA-NEXT:    sltu a0, s2, a4
-; RV32IA-NEXT:  .LBB212_5: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB212_2 Depth=1
+; RV32IA-NEXT:  .LBB222_5: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB222_2 Depth=1
 ; RV32IA-NEXT:    mv a2, a4
 ; RV32IA-NEXT:    mv a3, a5
-; RV32IA-NEXT:    bnez a0, .LBB212_1
+; RV32IA-NEXT:    bnez a0, .LBB222_1
 ; RV32IA-NEXT:  # %bb.6: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB212_2 Depth=1
+; RV32IA-NEXT:    # in Loop: Header=BB222_2 Depth=1
 ; RV32IA-NEXT:    mv a2, s2
 ; RV32IA-NEXT:    mv a3, s1
-; RV32IA-NEXT:    j .LBB212_1
-; RV32IA-NEXT:  .LBB212_7: # %atomicrmw.end
+; RV32IA-NEXT:    j .LBB222_1
+; RV32IA-NEXT:  .LBB222_7: # %atomicrmw.end
 ; RV32IA-NEXT:    mv a0, a4
 ; RV32IA-NEXT:    mv a1, a5
 ; RV32IA-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
@@ -16171,9 +16807,9 @@ define i64 @atomicrmw_umax_i64_release(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    ld a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
-; RV64I-NEXT:    j .LBB212_2
-; RV64I-NEXT:  .LBB212_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB212_2 Depth=1
+; RV64I-NEXT:    j .LBB222_2
+; RV64I-NEXT:  .LBB222_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB222_2 Depth=1
 ; RV64I-NEXT:    sd a3, 0(sp)
 ; RV64I-NEXT:    mv a1, sp
 ; RV64I-NEXT:    li a3, 3
@@ -16181,16 +16817,16 @@ define i64 @atomicrmw_umax_i64_release(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld a3, 0(sp)
-; RV64I-NEXT:    bnez a0, .LBB212_4
-; RV64I-NEXT:  .LBB212_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB222_4
+; RV64I-NEXT:  .LBB222_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    bltu s1, a3, .LBB212_1
+; RV64I-NEXT:    bltu s1, a3, .LBB222_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB212_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB222_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB212_1
-; RV64I-NEXT:  .LBB212_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB222_1
+; RV64I-NEXT:  .LBB222_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
@@ -16219,9 +16855,9 @@ define i64 @atomicrmw_umax_i64_acq_rel(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    lw a4, 0(a0)
 ; RV32I-NEXT:    mv s1, a2
 ; RV32I-NEXT:    mv s2, a1
-; RV32I-NEXT:    j .LBB213_2
-; RV32I-NEXT:  .LBB213_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB213_2 Depth=1
+; RV32I-NEXT:    j .LBB223_2
+; RV32I-NEXT:  .LBB223_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB223_2 Depth=1
 ; RV32I-NEXT:    sw a4, 8(sp)
 ; RV32I-NEXT:    sw a5, 12(sp)
 ; RV32I-NEXT:    addi a1, sp, 8
@@ -16231,27 +16867,27 @@ define i64 @atomicrmw_umax_i64_acq_rel(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
-; RV32I-NEXT:    bnez a0, .LBB213_7
-; RV32I-NEXT:  .LBB213_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB223_7
+; RV32I-NEXT:  .LBB223_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32I-NEXT:    beq a5, s1, .LBB213_4
+; RV32I-NEXT:    beq a5, s1, .LBB223_4
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB213_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB223_2 Depth=1
 ; RV32I-NEXT:    sltu a0, s1, a5
-; RV32I-NEXT:    j .LBB213_5
-; RV32I-NEXT:  .LBB213_4: # in Loop: Header=BB213_2 Depth=1
-; RV32I-NEXT:    sltu a0, s2, a4
-; RV32I-NEXT:  .LBB213_5: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB213_2 Depth=1
+; RV32I-NEXT:    j .LBB223_5
+; RV32I-NEXT:  .LBB223_4: # in Loop: Header=BB223_2 Depth=1
+; RV32I-NEXT:    sltu a0, s2, a4
+; RV32I-NEXT:  .LBB223_5: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB223_2 Depth=1
 ; RV32I-NEXT:    mv a2, a4
 ; RV32I-NEXT:    mv a3, a5
-; RV32I-NEXT:    bnez a0, .LBB213_1
+; RV32I-NEXT:    bnez a0, .LBB223_1
 ; RV32I-NEXT:  # %bb.6: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB213_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB223_2 Depth=1
 ; RV32I-NEXT:    mv a2, s2
 ; RV32I-NEXT:    mv a3, s1
-; RV32I-NEXT:    j .LBB213_1
-; RV32I-NEXT:  .LBB213_7: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB223_1
+; RV32I-NEXT:  .LBB223_7: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a4
 ; RV32I-NEXT:    mv a1, a5
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
@@ -16273,9 +16909,9 @@ define i64 @atomicrmw_umax_i64_acq_rel(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    lw a4, 0(a0)
 ; RV32IA-NEXT:    mv s1, a2
 ; RV32IA-NEXT:    mv s2, a1
-; RV32IA-NEXT:    j .LBB213_2
-; RV32IA-NEXT:  .LBB213_1: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB213_2 Depth=1
+; RV32IA-NEXT:    j .LBB223_2
+; RV32IA-NEXT:  .LBB223_1: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB223_2 Depth=1
 ; RV32IA-NEXT:    sw a4, 8(sp)
 ; RV32IA-NEXT:    sw a5, 12(sp)
 ; RV32IA-NEXT:    addi a1, sp, 8
@@ -16285,27 +16921,27 @@ define i64 @atomicrmw_umax_i64_acq_rel(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
-; RV32IA-NEXT:    bnez a0, .LBB213_7
-; RV32IA-NEXT:  .LBB213_2: # %atomicrmw.start
+; RV32IA-NEXT:    bnez a0, .LBB223_7
+; RV32IA-NEXT:  .LBB223_2: # %atomicrmw.start
 ; RV32IA-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32IA-NEXT:    beq a5, s1, .LBB213_4
+; RV32IA-NEXT:    beq a5, s1, .LBB223_4
 ; RV32IA-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB213_2 Depth=1
+; RV32IA-NEXT:    # in Loop: Header=BB223_2 Depth=1
 ; RV32IA-NEXT:    sltu a0, s1, a5
-; RV32IA-NEXT:    j .LBB213_5
-; RV32IA-NEXT:  .LBB213_4: # in Loop: Header=BB213_2 Depth=1
+; RV32IA-NEXT:    j .LBB223_5
+; RV32IA-NEXT:  .LBB223_4: # in Loop: Header=BB223_2 Depth=1
 ; RV32IA-NEXT:    sltu a0, s2, a4
-; RV32IA-NEXT:  .LBB213_5: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB213_2 Depth=1
+; RV32IA-NEXT:  .LBB223_5: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB223_2 Depth=1
 ; RV32IA-NEXT:    mv a2, a4
 ; RV32IA-NEXT:    mv a3, a5
-; RV32IA-NEXT:    bnez a0, .LBB213_1
+; RV32IA-NEXT:    bnez a0, .LBB223_1
 ; RV32IA-NEXT:  # %bb.6: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB213_2 Depth=1
+; RV32IA-NEXT:    # in Loop: Header=BB223_2 Depth=1
 ; RV32IA-NEXT:    mv a2, s2
 ; RV32IA-NEXT:    mv a3, s1
-; RV32IA-NEXT:    j .LBB213_1
-; RV32IA-NEXT:  .LBB213_7: # %atomicrmw.end
+; RV32IA-NEXT:    j .LBB223_1
+; RV32IA-NEXT:  .LBB223_7: # %atomicrmw.end
 ; RV32IA-NEXT:    mv a0, a4
 ; RV32IA-NEXT:    mv a1, a5
 ; RV32IA-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
@@ -16324,9 +16960,9 @@ define i64 @atomicrmw_umax_i64_acq_rel(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    ld a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
-; RV64I-NEXT:    j .LBB213_2
-; RV64I-NEXT:  .LBB213_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB213_2 Depth=1
+; RV64I-NEXT:    j .LBB223_2
+; RV64I-NEXT:  .LBB223_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB223_2 Depth=1
 ; RV64I-NEXT:    sd a3, 0(sp)
 ; RV64I-NEXT:    mv a1, sp
 ; RV64I-NEXT:    li a3, 4
@@ -16334,16 +16970,16 @@ define i64 @atomicrmw_umax_i64_acq_rel(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld a3, 0(sp)
-; RV64I-NEXT:    bnez a0, .LBB213_4
-; RV64I-NEXT:  .LBB213_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB223_4
+; RV64I-NEXT:  .LBB223_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    bltu s1, a3, .LBB213_1
+; RV64I-NEXT:    bltu s1, a3, .LBB223_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB213_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB223_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB213_1
-; RV64I-NEXT:  .LBB213_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB223_1
+; RV64I-NEXT:  .LBB223_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
@@ -16372,9 +17008,9 @@ define i64 @atomicrmw_umax_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    lw a4, 0(a0)
 ; RV32I-NEXT:    mv s1, a2
 ; RV32I-NEXT:    mv s2, a1
-; RV32I-NEXT:    j .LBB214_2
-; RV32I-NEXT:  .LBB214_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB214_2 Depth=1
+; RV32I-NEXT:    j .LBB224_2
+; RV32I-NEXT:  .LBB224_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB224_2 Depth=1
 ; RV32I-NEXT:    sw a4, 8(sp)
 ; RV32I-NEXT:    sw a5, 12(sp)
 ; RV32I-NEXT:    addi a1, sp, 8
@@ -16384,27 +17020,27 @@ define i64 @atomicrmw_umax_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
-; RV32I-NEXT:    bnez a0, .LBB214_7
-; RV32I-NEXT:  .LBB214_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB224_7
+; RV32I-NEXT:  .LBB224_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32I-NEXT:    beq a5, s1, .LBB214_4
+; RV32I-NEXT:    beq a5, s1, .LBB224_4
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB214_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB224_2 Depth=1
 ; RV32I-NEXT:    sltu a0, s1, a5
-; RV32I-NEXT:    j .LBB214_5
-; RV32I-NEXT:  .LBB214_4: # in Loop: Header=BB214_2 Depth=1
+; RV32I-NEXT:    j .LBB224_5
+; RV32I-NEXT:  .LBB224_4: # in Loop: Header=BB224_2 Depth=1
 ; RV32I-NEXT:    sltu a0, s2, a4
-; RV32I-NEXT:  .LBB214_5: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB214_2 Depth=1
+; RV32I-NEXT:  .LBB224_5: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB224_2 Depth=1
 ; RV32I-NEXT:    mv a2, a4
 ; RV32I-NEXT:    mv a3, a5
-; RV32I-NEXT:    bnez a0, .LBB214_1
+; RV32I-NEXT:    bnez a0, .LBB224_1
 ; RV32I-NEXT:  # %bb.6: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB214_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB224_2 Depth=1
 ; RV32I-NEXT:    mv a2, s2
 ; RV32I-NEXT:    mv a3, s1
-; RV32I-NEXT:    j .LBB214_1
-; RV32I-NEXT:  .LBB214_7: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB224_1
+; RV32I-NEXT:  .LBB224_7: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a4
 ; RV32I-NEXT:    mv a1, a5
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
@@ -16426,9 +17062,9 @@ define i64 @atomicrmw_umax_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    lw a4, 0(a0)
 ; RV32IA-NEXT:    mv s1, a2
 ; RV32IA-NEXT:    mv s2, a1
-; RV32IA-NEXT:    j .LBB214_2
-; RV32IA-NEXT:  .LBB214_1: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB214_2 Depth=1
+; RV32IA-NEXT:    j .LBB224_2
+; RV32IA-NEXT:  .LBB224_1: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB224_2 Depth=1
 ; RV32IA-NEXT:    sw a4, 8(sp)
 ; RV32IA-NEXT:    sw a5, 12(sp)
 ; RV32IA-NEXT:    addi a1, sp, 8
@@ -16438,27 +17074,27 @@ define i64 @atomicrmw_umax_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
-; RV32IA-NEXT:    bnez a0, .LBB214_7
-; RV32IA-NEXT:  .LBB214_2: # %atomicrmw.start
+; RV32IA-NEXT:    bnez a0, .LBB224_7
+; RV32IA-NEXT:  .LBB224_2: # %atomicrmw.start
 ; RV32IA-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32IA-NEXT:    beq a5, s1, .LBB214_4
+; RV32IA-NEXT:    beq a5, s1, .LBB224_4
 ; RV32IA-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB214_2 Depth=1
+; RV32IA-NEXT:    # in Loop: Header=BB224_2 Depth=1
 ; RV32IA-NEXT:    sltu a0, s1, a5
-; RV32IA-NEXT:    j .LBB214_5
-; RV32IA-NEXT:  .LBB214_4: # in Loop: Header=BB214_2 Depth=1
+; RV32IA-NEXT:    j .LBB224_5
+; RV32IA-NEXT:  .LBB224_4: # in Loop: Header=BB224_2 Depth=1
 ; RV32IA-NEXT:    sltu a0, s2, a4
-; RV32IA-NEXT:  .LBB214_5: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB214_2 Depth=1
+; RV32IA-NEXT:  .LBB224_5: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB224_2 Depth=1
 ; RV32IA-NEXT:    mv a2, a4
 ; RV32IA-NEXT:    mv a3, a5
-; RV32IA-NEXT:    bnez a0, .LBB214_1
+; RV32IA-NEXT:    bnez a0, .LBB224_1
 ; RV32IA-NEXT:  # %bb.6: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB214_2 Depth=1
+; RV32IA-NEXT:    # in Loop: Header=BB224_2 Depth=1
 ; RV32IA-NEXT:    mv a2, s2
 ; RV32IA-NEXT:    mv a3, s1
-; RV32IA-NEXT:    j .LBB214_1
-; RV32IA-NEXT:  .LBB214_7: # %atomicrmw.end
+; RV32IA-NEXT:    j .LBB224_1
+; RV32IA-NEXT:  .LBB224_7: # %atomicrmw.end
 ; RV32IA-NEXT:    mv a0, a4
 ; RV32IA-NEXT:    mv a1, a5
 ; RV32IA-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
@@ -16477,9 +17113,9 @@ define i64 @atomicrmw_umax_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    ld a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
-; RV64I-NEXT:    j .LBB214_2
-; RV64I-NEXT:  .LBB214_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB214_2 Depth=1
+; RV64I-NEXT:    j .LBB224_2
+; RV64I-NEXT:  .LBB224_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB224_2 Depth=1
 ; RV64I-NEXT:    sd a3, 0(sp)
 ; RV64I-NEXT:    mv a1, sp
 ; RV64I-NEXT:    li a3, 5
@@ -16487,16 +17123,16 @@ define i64 @atomicrmw_umax_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld a3, 0(sp)
-; RV64I-NEXT:    bnez a0, .LBB214_4
-; RV64I-NEXT:  .LBB214_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB224_4
+; RV64I-NEXT:  .LBB224_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    bltu s1, a3, .LBB214_1
+; RV64I-NEXT:    bltu s1, a3, .LBB224_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB214_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB224_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB214_1
-; RV64I-NEXT:  .LBB214_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB224_1
+; RV64I-NEXT:  .LBB224_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
@@ -16525,9 +17161,9 @@ define i64 @atomicrmw_umin_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    lw a4, 0(a0)
 ; RV32I-NEXT:    mv s1, a2
 ; RV32I-NEXT:    mv s2, a1
-; RV32I-NEXT:    j .LBB215_2
-; RV32I-NEXT:  .LBB215_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB215_2 Depth=1
+; RV32I-NEXT:    j .LBB225_2
+; RV32I-NEXT:  .LBB225_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB225_2 Depth=1
 ; RV32I-NEXT:    sw a4, 8(sp)
 ; RV32I-NEXT:    sw a5, 12(sp)
 ; RV32I-NEXT:    addi a1, sp, 8
@@ -16537,27 +17173,27 @@ define i64 @atomicrmw_umin_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
-; RV32I-NEXT:    bnez a0, .LBB215_7
-; RV32I-NEXT:  .LBB215_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB225_7
+; RV32I-NEXT:  .LBB225_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32I-NEXT:    beq a5, s1, .LBB215_4
+; RV32I-NEXT:    beq a5, s1, .LBB225_4
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB215_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB225_2 Depth=1
 ; RV32I-NEXT:    sltu a0, s1, a5
-; RV32I-NEXT:    j .LBB215_5
-; RV32I-NEXT:  .LBB215_4: # in Loop: Header=BB215_2 Depth=1
+; RV32I-NEXT:    j .LBB225_5
+; RV32I-NEXT:  .LBB225_4: # in Loop: Header=BB225_2 Depth=1
 ; RV32I-NEXT:    sltu a0, s2, a4
-; RV32I-NEXT:  .LBB215_5: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB215_2 Depth=1
+; RV32I-NEXT:  .LBB225_5: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB225_2 Depth=1
 ; RV32I-NEXT:    mv a2, a4
 ; RV32I-NEXT:    mv a3, a5
-; RV32I-NEXT:    beqz a0, .LBB215_1
+; RV32I-NEXT:    beqz a0, .LBB225_1
 ; RV32I-NEXT:  # %bb.6: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB215_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB225_2 Depth=1
 ; RV32I-NEXT:    mv a2, s2
 ; RV32I-NEXT:    mv a3, s1
-; RV32I-NEXT:    j .LBB215_1
-; RV32I-NEXT:  .LBB215_7: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB225_1
+; RV32I-NEXT:  .LBB225_7: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a4
 ; RV32I-NEXT:    mv a1, a5
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
@@ -16579,9 +17215,9 @@ define i64 @atomicrmw_umin_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    lw a4, 0(a0)
 ; RV32IA-NEXT:    mv s1, a2
 ; RV32IA-NEXT:    mv s2, a1
-; RV32IA-NEXT:    j .LBB215_2
-; RV32IA-NEXT:  .LBB215_1: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB215_2 Depth=1
+; RV32IA-NEXT:    j .LBB225_2
+; RV32IA-NEXT:  .LBB225_1: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB225_2 Depth=1
 ; RV32IA-NEXT:    sw a4, 8(sp)
 ; RV32IA-NEXT:    sw a5, 12(sp)
 ; RV32IA-NEXT:    addi a1, sp, 8
@@ -16591,27 +17227,27 @@ define i64 @atomicrmw_umin_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
-; RV32IA-NEXT:    bnez a0, .LBB215_7
-; RV32IA-NEXT:  .LBB215_2: # %atomicrmw.start
+; RV32IA-NEXT:    bnez a0, .LBB225_7
+; RV32IA-NEXT:  .LBB225_2: # %atomicrmw.start
 ; RV32IA-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32IA-NEXT:    beq a5, s1, .LBB215_4
+; RV32IA-NEXT:    beq a5, s1, .LBB225_4
 ; RV32IA-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB215_2 Depth=1
+; RV32IA-NEXT:    # in Loop: Header=BB225_2 Depth=1
 ; RV32IA-NEXT:    sltu a0, s1, a5
-; RV32IA-NEXT:    j .LBB215_5
-; RV32IA-NEXT:  .LBB215_4: # in Loop: Header=BB215_2 Depth=1
+; RV32IA-NEXT:    j .LBB225_5
+; RV32IA-NEXT:  .LBB225_4: # in Loop: Header=BB225_2 Depth=1
 ; RV32IA-NEXT:    sltu a0, s2, a4
-; RV32IA-NEXT:  .LBB215_5: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB215_2 Depth=1
+; RV32IA-NEXT:  .LBB225_5: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB225_2 Depth=1
 ; RV32IA-NEXT:    mv a2, a4
 ; RV32IA-NEXT:    mv a3, a5
-; RV32IA-NEXT:    beqz a0, .LBB215_1
+; RV32IA-NEXT:    beqz a0, .LBB225_1
 ; RV32IA-NEXT:  # %bb.6: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB215_2 Depth=1
+; RV32IA-NEXT:    # in Loop: Header=BB225_2 Depth=1
 ; RV32IA-NEXT:    mv a2, s2
 ; RV32IA-NEXT:    mv a3, s1
-; RV32IA-NEXT:    j .LBB215_1
-; RV32IA-NEXT:  .LBB215_7: # %atomicrmw.end
+; RV32IA-NEXT:    j .LBB225_1
+; RV32IA-NEXT:  .LBB225_7: # %atomicrmw.end
 ; RV32IA-NEXT:    mv a0, a4
 ; RV32IA-NEXT:    mv a1, a5
 ; RV32IA-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
@@ -16630,9 +17266,9 @@ define i64 @atomicrmw_umin_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    ld a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
-; RV64I-NEXT:    j .LBB215_2
-; RV64I-NEXT:  .LBB215_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB215_2 Depth=1
+; RV64I-NEXT:    j .LBB225_2
+; RV64I-NEXT:  .LBB225_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB225_2 Depth=1
 ; RV64I-NEXT:    sd a3, 0(sp)
 ; RV64I-NEXT:    mv a1, sp
 ; RV64I-NEXT:    mv a0, s0
@@ -16640,16 +17276,16 @@ define i64 @atomicrmw_umin_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld a3, 0(sp)
-; RV64I-NEXT:    bnez a0, .LBB215_4
-; RV64I-NEXT:  .LBB215_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB225_4
+; RV64I-NEXT:  .LBB225_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    bgeu s1, a3, .LBB215_1
+; RV64I-NEXT:    bgeu s1, a3, .LBB225_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB215_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB225_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB215_1
-; RV64I-NEXT:  .LBB215_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB225_1
+; RV64I-NEXT:  .LBB225_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
@@ -16678,9 +17314,9 @@ define i64 @atomicrmw_umin_i64_acquire(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    lw a4, 0(a0)
 ; RV32I-NEXT:    mv s1, a2
 ; RV32I-NEXT:    mv s2, a1
-; RV32I-NEXT:    j .LBB216_2
-; RV32I-NEXT:  .LBB216_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB216_2 Depth=1
+; RV32I-NEXT:    j .LBB226_2
+; RV32I-NEXT:  .LBB226_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB226_2 Depth=1
 ; RV32I-NEXT:    sw a4, 8(sp)
 ; RV32I-NEXT:    sw a5, 12(sp)
 ; RV32I-NEXT:    addi a1, sp, 8
@@ -16690,27 +17326,27 @@ define i64 @atomicrmw_umin_i64_acquire(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
-; RV32I-NEXT:    bnez a0, .LBB216_7
-; RV32I-NEXT:  .LBB216_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB226_7
+; RV32I-NEXT:  .LBB226_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32I-NEXT:    beq a5, s1, .LBB216_4
+; RV32I-NEXT:    beq a5, s1, .LBB226_4
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB216_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB226_2 Depth=1
 ; RV32I-NEXT:    sltu a0, s1, a5
-; RV32I-NEXT:    j .LBB216_5
-; RV32I-NEXT:  .LBB216_4: # in Loop: Header=BB216_2 Depth=1
+; RV32I-NEXT:    j .LBB226_5
+; RV32I-NEXT:  .LBB226_4: # in Loop: Header=BB226_2 Depth=1
 ; RV32I-NEXT:    sltu a0, s2, a4
-; RV32I-NEXT:  .LBB216_5: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB216_2 Depth=1
+; RV32I-NEXT:  .LBB226_5: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB226_2 Depth=1
 ; RV32I-NEXT:    mv a2, a4
 ; RV32I-NEXT:    mv a3, a5
-; RV32I-NEXT:    beqz a0, .LBB216_1
+; RV32I-NEXT:    beqz a0, .LBB226_1
 ; RV32I-NEXT:  # %bb.6: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB216_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB226_2 Depth=1
 ; RV32I-NEXT:    mv a2, s2
 ; RV32I-NEXT:    mv a3, s1
-; RV32I-NEXT:    j .LBB216_1
-; RV32I-NEXT:  .LBB216_7: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB226_1
+; RV32I-NEXT:  .LBB226_7: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a4
 ; RV32I-NEXT:    mv a1, a5
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
@@ -16732,9 +17368,9 @@ define i64 @atomicrmw_umin_i64_acquire(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    lw a4, 0(a0)
 ; RV32IA-NEXT:    mv s1, a2
 ; RV32IA-NEXT:    mv s2, a1
-; RV32IA-NEXT:    j .LBB216_2
-; RV32IA-NEXT:  .LBB216_1: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB216_2 Depth=1
+; RV32IA-NEXT:    j .LBB226_2
+; RV32IA-NEXT:  .LBB226_1: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB226_2 Depth=1
 ; RV32IA-NEXT:    sw a4, 8(sp)
 ; RV32IA-NEXT:    sw a5, 12(sp)
 ; RV32IA-NEXT:    addi a1, sp, 8
@@ -16744,27 +17380,27 @@ define i64 @atomicrmw_umin_i64_acquire(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
-; RV32IA-NEXT:    bnez a0, .LBB216_7
-; RV32IA-NEXT:  .LBB216_2: # %atomicrmw.start
+; RV32IA-NEXT:    bnez a0, .LBB226_7
+; RV32IA-NEXT:  .LBB226_2: # %atomicrmw.start
 ; RV32IA-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32IA-NEXT:    beq a5, s1, .LBB216_4
+; RV32IA-NEXT:    beq a5, s1, .LBB226_4
 ; RV32IA-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB216_2 Depth=1
+; RV32IA-NEXT:    # in Loop: Header=BB226_2 Depth=1
 ; RV32IA-NEXT:    sltu a0, s1, a5
-; RV32IA-NEXT:    j .LBB216_5
-; RV32IA-NEXT:  .LBB216_4: # in Loop: Header=BB216_2 Depth=1
+; RV32IA-NEXT:    j .LBB226_5
+; RV32IA-NEXT:  .LBB226_4: # in Loop: Header=BB226_2 Depth=1
 ; RV32IA-NEXT:    sltu a0, s2, a4
-; RV32IA-NEXT:  .LBB216_5: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB216_2 Depth=1
+; RV32IA-NEXT:  .LBB226_5: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB226_2 Depth=1
 ; RV32IA-NEXT:    mv a2, a4
 ; RV32IA-NEXT:    mv a3, a5
-; RV32IA-NEXT:    beqz a0, .LBB216_1
+; RV32IA-NEXT:    beqz a0, .LBB226_1
 ; RV32IA-NEXT:  # %bb.6: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB216_2 Depth=1
+; RV32IA-NEXT:    # in Loop: Header=BB226_2 Depth=1
 ; RV32IA-NEXT:    mv a2, s2
 ; RV32IA-NEXT:    mv a3, s1
-; RV32IA-NEXT:    j .LBB216_1
-; RV32IA-NEXT:  .LBB216_7: # %atomicrmw.end
+; RV32IA-NEXT:    j .LBB226_1
+; RV32IA-NEXT:  .LBB226_7: # %atomicrmw.end
 ; RV32IA-NEXT:    mv a0, a4
 ; RV32IA-NEXT:    mv a1, a5
 ; RV32IA-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
@@ -16783,9 +17419,9 @@ define i64 @atomicrmw_umin_i64_acquire(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    ld a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
-; RV64I-NEXT:    j .LBB216_2
-; RV64I-NEXT:  .LBB216_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB216_2 Depth=1
+; RV64I-NEXT:    j .LBB226_2
+; RV64I-NEXT:  .LBB226_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB226_2 Depth=1
 ; RV64I-NEXT:    sd a3, 0(sp)
 ; RV64I-NEXT:    mv a1, sp
 ; RV64I-NEXT:    li a3, 2
@@ -16793,16 +17429,16 @@ define i64 @atomicrmw_umin_i64_acquire(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld a3, 0(sp)
-; RV64I-NEXT:    bnez a0, .LBB216_4
-; RV64I-NEXT:  .LBB216_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB226_4
+; RV64I-NEXT:  .LBB226_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    bgeu s1, a3, .LBB216_1
+; RV64I-NEXT:    bgeu s1, a3, .LBB226_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB216_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB226_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB216_1
-; RV64I-NEXT:  .LBB216_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB226_1
+; RV64I-NEXT:  .LBB226_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
@@ -16831,9 +17467,9 @@ define i64 @atomicrmw_umin_i64_release(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    lw a4, 0(a0)
 ; RV32I-NEXT:    mv s1, a2
 ; RV32I-NEXT:    mv s2, a1
-; RV32I-NEXT:    j .LBB217_2
-; RV32I-NEXT:  .LBB217_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB217_2 Depth=1
+; RV32I-NEXT:    j .LBB227_2
+; RV32I-NEXT:  .LBB227_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB227_2 Depth=1
 ; RV32I-NEXT:    sw a4, 8(sp)
 ; RV32I-NEXT:    sw a5, 12(sp)
 ; RV32I-NEXT:    addi a1, sp, 8
@@ -16843,27 +17479,27 @@ define i64 @atomicrmw_umin_i64_release(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
-; RV32I-NEXT:    bnez a0, .LBB217_7
-; RV32I-NEXT:  .LBB217_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB227_7
+; RV32I-NEXT:  .LBB227_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32I-NEXT:    beq a5, s1, .LBB217_4
+; RV32I-NEXT:    beq a5, s1, .LBB227_4
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB217_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB227_2 Depth=1
 ; RV32I-NEXT:    sltu a0, s1, a5
-; RV32I-NEXT:    j .LBB217_5
-; RV32I-NEXT:  .LBB217_4: # in Loop: Header=BB217_2 Depth=1
+; RV32I-NEXT:    j .LBB227_5
+; RV32I-NEXT:  .LBB227_4: # in Loop: Header=BB227_2 Depth=1
 ; RV32I-NEXT:    sltu a0, s2, a4
-; RV32I-NEXT:  .LBB217_5: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB217_2 Depth=1
+; RV32I-NEXT:  .LBB227_5: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB227_2 Depth=1
 ; RV32I-NEXT:    mv a2, a4
 ; RV32I-NEXT:    mv a3, a5
-; RV32I-NEXT:    beqz a0, .LBB217_1
+; RV32I-NEXT:    beqz a0, .LBB227_1
 ; RV32I-NEXT:  # %bb.6: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB217_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB227_2 Depth=1
 ; RV32I-NEXT:    mv a2, s2
 ; RV32I-NEXT:    mv a3, s1
-; RV32I-NEXT:    j .LBB217_1
-; RV32I-NEXT:  .LBB217_7: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB227_1
+; RV32I-NEXT:  .LBB227_7: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a4
 ; RV32I-NEXT:    mv a1, a5
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
@@ -16885,9 +17521,9 @@ define i64 @atomicrmw_umin_i64_release(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    lw a4, 0(a0)
 ; RV32IA-NEXT:    mv s1, a2
 ; RV32IA-NEXT:    mv s2, a1
-; RV32IA-NEXT:    j .LBB217_2
-; RV32IA-NEXT:  .LBB217_1: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB217_2 Depth=1
+; RV32IA-NEXT:    j .LBB227_2
+; RV32IA-NEXT:  .LBB227_1: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB227_2 Depth=1
 ; RV32IA-NEXT:    sw a4, 8(sp)
 ; RV32IA-NEXT:    sw a5, 12(sp)
 ; RV32IA-NEXT:    addi a1, sp, 8
@@ -16897,27 +17533,27 @@ define i64 @atomicrmw_umin_i64_release(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
-; RV32IA-NEXT:    bnez a0, .LBB217_7
-; RV32IA-NEXT:  .LBB217_2: # %atomicrmw.start
+; RV32IA-NEXT:    bnez a0, .LBB227_7
+; RV32IA-NEXT:  .LBB227_2: # %atomicrmw.start
 ; RV32IA-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32IA-NEXT:    beq a5, s1, .LBB217_4
+; RV32IA-NEXT:    beq a5, s1, .LBB227_4
 ; RV32IA-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB217_2 Depth=1
+; RV32IA-NEXT:    # in Loop: Header=BB227_2 Depth=1
 ; RV32IA-NEXT:    sltu a0, s1, a5
-; RV32IA-NEXT:    j .LBB217_5
-; RV32IA-NEXT:  .LBB217_4: # in Loop: Header=BB217_2 Depth=1
+; RV32IA-NEXT:    j .LBB227_5
+; RV32IA-NEXT:  .LBB227_4: # in Loop: Header=BB227_2 Depth=1
 ; RV32IA-NEXT:    sltu a0, s2, a4
-; RV32IA-NEXT:  .LBB217_5: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB217_2 Depth=1
+; RV32IA-NEXT:  .LBB227_5: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB227_2 Depth=1
 ; RV32IA-NEXT:    mv a2, a4
 ; RV32IA-NEXT:    mv a3, a5
-; RV32IA-NEXT:    beqz a0, .LBB217_1
+; RV32IA-NEXT:    beqz a0, .LBB227_1
 ; RV32IA-NEXT:  # %bb.6: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB217_2 Depth=1
+; RV32IA-NEXT:    # in Loop: Header=BB227_2 Depth=1
 ; RV32IA-NEXT:    mv a2, s2
 ; RV32IA-NEXT:    mv a3, s1
-; RV32IA-NEXT:    j .LBB217_1
-; RV32IA-NEXT:  .LBB217_7: # %atomicrmw.end
+; RV32IA-NEXT:    j .LBB227_1
+; RV32IA-NEXT:  .LBB227_7: # %atomicrmw.end
 ; RV32IA-NEXT:    mv a0, a4
 ; RV32IA-NEXT:    mv a1, a5
 ; RV32IA-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
@@ -16936,9 +17572,9 @@ define i64 @atomicrmw_umin_i64_release(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    ld a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
-; RV64I-NEXT:    j .LBB217_2
-; RV64I-NEXT:  .LBB217_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB217_2 Depth=1
+; RV64I-NEXT:    j .LBB227_2
+; RV64I-NEXT:  .LBB227_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB227_2 Depth=1
 ; RV64I-NEXT:    sd a3, 0(sp)
 ; RV64I-NEXT:    mv a1, sp
 ; RV64I-NEXT:    li a3, 3
@@ -16946,16 +17582,16 @@ define i64 @atomicrmw_umin_i64_release(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld a3, 0(sp)
-; RV64I-NEXT:    bnez a0, .LBB217_4
-; RV64I-NEXT:  .LBB217_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB227_4
+; RV64I-NEXT:  .LBB227_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    bgeu s1, a3, .LBB217_1
+; RV64I-NEXT:    bgeu s1, a3, .LBB227_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB217_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB227_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB217_1
-; RV64I-NEXT:  .LBB217_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB227_1
+; RV64I-NEXT:  .LBB227_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
@@ -16984,9 +17620,9 @@ define i64 @atomicrmw_umin_i64_acq_rel(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    lw a4, 0(a0)
 ; RV32I-NEXT:    mv s1, a2
 ; RV32I-NEXT:    mv s2, a1
-; RV32I-NEXT:    j .LBB218_2
-; RV32I-NEXT:  .LBB218_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB218_2 Depth=1
+; RV32I-NEXT:    j .LBB228_2
+; RV32I-NEXT:  .LBB228_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB228_2 Depth=1
 ; RV32I-NEXT:    sw a4, 8(sp)
 ; RV32I-NEXT:    sw a5, 12(sp)
 ; RV32I-NEXT:    addi a1, sp, 8
@@ -16996,27 +17632,27 @@ define i64 @atomicrmw_umin_i64_acq_rel(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
-; RV32I-NEXT:    bnez a0, .LBB218_7
-; RV32I-NEXT:  .LBB218_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB228_7
+; RV32I-NEXT:  .LBB228_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32I-NEXT:    beq a5, s1, .LBB218_4
+; RV32I-NEXT:    beq a5, s1, .LBB228_4
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB218_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB228_2 Depth=1
 ; RV32I-NEXT:    sltu a0, s1, a5
-; RV32I-NEXT:    j .LBB218_5
-; RV32I-NEXT:  .LBB218_4: # in Loop: Header=BB218_2 Depth=1
+; RV32I-NEXT:    j .LBB228_5
+; RV32I-NEXT:  .LBB228_4: # in Loop: Header=BB228_2 Depth=1
 ; RV32I-NEXT:    sltu a0, s2, a4
-; RV32I-NEXT:  .LBB218_5: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB218_2 Depth=1
+; RV32I-NEXT:  .LBB228_5: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB228_2 Depth=1
 ; RV32I-NEXT:    mv a2, a4
 ; RV32I-NEXT:    mv a3, a5
-; RV32I-NEXT:    beqz a0, .LBB218_1
+; RV32I-NEXT:    beqz a0, .LBB228_1
 ; RV32I-NEXT:  # %bb.6: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB218_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB228_2 Depth=1
 ; RV32I-NEXT:    mv a2, s2
 ; RV32I-NEXT:    mv a3, s1
-; RV32I-NEXT:    j .LBB218_1
-; RV32I-NEXT:  .LBB218_7: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB228_1
+; RV32I-NEXT:  .LBB228_7: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a4
 ; RV32I-NEXT:    mv a1, a5
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
@@ -17038,9 +17674,9 @@ define i64 @atomicrmw_umin_i64_acq_rel(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    lw a4, 0(a0)
 ; RV32IA-NEXT:    mv s1, a2
 ; RV32IA-NEXT:    mv s2, a1
-; RV32IA-NEXT:    j .LBB218_2
-; RV32IA-NEXT:  .LBB218_1: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB218_2 Depth=1
+; RV32IA-NEXT:    j .LBB228_2
+; RV32IA-NEXT:  .LBB228_1: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB228_2 Depth=1
 ; RV32IA-NEXT:    sw a4, 8(sp)
 ; RV32IA-NEXT:    sw a5, 12(sp)
 ; RV32IA-NEXT:    addi a1, sp, 8
@@ -17050,27 +17686,27 @@ define i64 @atomicrmw_umin_i64_acq_rel(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
-; RV32IA-NEXT:    bnez a0, .LBB218_7
-; RV32IA-NEXT:  .LBB218_2: # %atomicrmw.start
+; RV32IA-NEXT:    bnez a0, .LBB228_7
+; RV32IA-NEXT:  .LBB228_2: # %atomicrmw.start
 ; RV32IA-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32IA-NEXT:    beq a5, s1, .LBB218_4
+; RV32IA-NEXT:    beq a5, s1, .LBB228_4
 ; RV32IA-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB218_2 Depth=1
+; RV32IA-NEXT:    # in Loop: Header=BB228_2 Depth=1
 ; RV32IA-NEXT:    sltu a0, s1, a5
-; RV32IA-NEXT:    j .LBB218_5
-; RV32IA-NEXT:  .LBB218_4: # in Loop: Header=BB218_2 Depth=1
+; RV32IA-NEXT:    j .LBB228_5
+; RV32IA-NEXT:  .LBB228_4: # in Loop: Header=BB228_2 Depth=1
 ; RV32IA-NEXT:    sltu a0, s2, a4
-; RV32IA-NEXT:  .LBB218_5: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB218_2 Depth=1
+; RV32IA-NEXT:  .LBB228_5: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB228_2 Depth=1
 ; RV32IA-NEXT:    mv a2, a4
 ; RV32IA-NEXT:    mv a3, a5
-; RV32IA-NEXT:    beqz a0, .LBB218_1
+; RV32IA-NEXT:    beqz a0, .LBB228_1
 ; RV32IA-NEXT:  # %bb.6: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB218_2 Depth=1
+; RV32IA-NEXT:    # in Loop: Header=BB228_2 Depth=1
 ; RV32IA-NEXT:    mv a2, s2
 ; RV32IA-NEXT:    mv a3, s1
-; RV32IA-NEXT:    j .LBB218_1
-; RV32IA-NEXT:  .LBB218_7: # %atomicrmw.end
+; RV32IA-NEXT:    j .LBB228_1
+; RV32IA-NEXT:  .LBB228_7: # %atomicrmw.end
 ; RV32IA-NEXT:    mv a0, a4
 ; RV32IA-NEXT:    mv a1, a5
 ; RV32IA-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
@@ -17089,9 +17725,9 @@ define i64 @atomicrmw_umin_i64_acq_rel(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    ld a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
-; RV64I-NEXT:    j .LBB218_2
-; RV64I-NEXT:  .LBB218_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB218_2 Depth=1
+; RV64I-NEXT:    j .LBB228_2
+; RV64I-NEXT:  .LBB228_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB228_2 Depth=1
 ; RV64I-NEXT:    sd a3, 0(sp)
 ; RV64I-NEXT:    mv a1, sp
 ; RV64I-NEXT:    li a3, 4
@@ -17099,16 +17735,16 @@ define i64 @atomicrmw_umin_i64_acq_rel(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld a3, 0(sp)
-; RV64I-NEXT:    bnez a0, .LBB218_4
-; RV64I-NEXT:  .LBB218_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB228_4
+; RV64I-NEXT:  .LBB228_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    bgeu s1, a3, .LBB218_1
+; RV64I-NEXT:    bgeu s1, a3, .LBB228_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB218_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB228_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB218_1
-; RV64I-NEXT:  .LBB218_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB228_1
+; RV64I-NEXT:  .LBB228_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
@@ -17137,9 +17773,9 @@ define i64 @atomicrmw_umin_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    lw a4, 0(a0)
 ; RV32I-NEXT:    mv s1, a2
 ; RV32I-NEXT:    mv s2, a1
-; RV32I-NEXT:    j .LBB219_2
-; RV32I-NEXT:  .LBB219_1: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB219_2 Depth=1
+; RV32I-NEXT:    j .LBB229_2
+; RV32I-NEXT:  .LBB229_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB229_2 Depth=1
 ; RV32I-NEXT:    sw a4, 8(sp)
 ; RV32I-NEXT:    sw a5, 12(sp)
 ; RV32I-NEXT:    addi a1, sp, 8
@@ -17149,27 +17785,27 @@ define i64 @atomicrmw_umin_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
-; RV32I-NEXT:    bnez a0, .LBB219_7
-; RV32I-NEXT:  .LBB219_2: # %atomicrmw.start
+; RV32I-NEXT:    bnez a0, .LBB229_7
+; RV32I-NEXT:  .LBB229_2: # %atomicrmw.start
 ; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32I-NEXT:    beq a5, s1, .LBB219_4
+; RV32I-NEXT:    beq a5, s1, .LBB229_4
 ; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB219_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB229_2 Depth=1
 ; RV32I-NEXT:    sltu a0, s1, a5
-; RV32I-NEXT:    j .LBB219_5
-; RV32I-NEXT:  .LBB219_4: # in Loop: Header=BB219_2 Depth=1
+; RV32I-NEXT:    j .LBB229_5
+; RV32I-NEXT:  .LBB229_4: # in Loop: Header=BB229_2 Depth=1
 ; RV32I-NEXT:    sltu a0, s2, a4
-; RV32I-NEXT:  .LBB219_5: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB219_2 Depth=1
+; RV32I-NEXT:  .LBB229_5: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB229_2 Depth=1
 ; RV32I-NEXT:    mv a2, a4
 ; RV32I-NEXT:    mv a3, a5
-; RV32I-NEXT:    beqz a0, .LBB219_1
+; RV32I-NEXT:    beqz a0, .LBB229_1
 ; RV32I-NEXT:  # %bb.6: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB219_2 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB229_2 Depth=1
 ; RV32I-NEXT:    mv a2, s2
 ; RV32I-NEXT:    mv a3, s1
-; RV32I-NEXT:    j .LBB219_1
-; RV32I-NEXT:  .LBB219_7: # %atomicrmw.end
+; RV32I-NEXT:    j .LBB229_1
+; RV32I-NEXT:  .LBB229_7: # %atomicrmw.end
 ; RV32I-NEXT:    mv a0, a4
 ; RV32I-NEXT:    mv a1, a5
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
@@ -17191,9 +17827,9 @@ define i64 @atomicrmw_umin_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    lw a4, 0(a0)
 ; RV32IA-NEXT:    mv s1, a2
 ; RV32IA-NEXT:    mv s2, a1
-; RV32IA-NEXT:    j .LBB219_2
-; RV32IA-NEXT:  .LBB219_1: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB219_2 Depth=1
+; RV32IA-NEXT:    j .LBB229_2
+; RV32IA-NEXT:  .LBB229_1: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB229_2 Depth=1
 ; RV32IA-NEXT:    sw a4, 8(sp)
 ; RV32IA-NEXT:    sw a5, 12(sp)
 ; RV32IA-NEXT:    addi a1, sp, 8
@@ -17203,27 +17839,27 @@ define i64 @atomicrmw_umin_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
-; RV32IA-NEXT:    bnez a0, .LBB219_7
-; RV32IA-NEXT:  .LBB219_2: # %atomicrmw.start
+; RV32IA-NEXT:    bnez a0, .LBB229_7
+; RV32IA-NEXT:  .LBB229_2: # %atomicrmw.start
 ; RV32IA-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32IA-NEXT:    beq a5, s1, .LBB219_4
+; RV32IA-NEXT:    beq a5, s1, .LBB229_4
 ; RV32IA-NEXT:  # %bb.3: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB219_2 Depth=1
+; RV32IA-NEXT:    # in Loop: Header=BB229_2 Depth=1
 ; RV32IA-NEXT:    sltu a0, s1, a5
-; RV32IA-NEXT:    j .LBB219_5
-; RV32IA-NEXT:  .LBB219_4: # in Loop: Header=BB219_2 Depth=1
+; RV32IA-NEXT:    j .LBB229_5
+; RV32IA-NEXT:  .LBB229_4: # in Loop: Header=BB229_2 Depth=1
 ; RV32IA-NEXT:    sltu a0, s2, a4
-; RV32IA-NEXT:  .LBB219_5: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB219_2 Depth=1
+; RV32IA-NEXT:  .LBB229_5: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB229_2 Depth=1
 ; RV32IA-NEXT:    mv a2, a4
 ; RV32IA-NEXT:    mv a3, a5
-; RV32IA-NEXT:    beqz a0, .LBB219_1
+; RV32IA-NEXT:    beqz a0, .LBB229_1
 ; RV32IA-NEXT:  # %bb.6: # %atomicrmw.start
-; RV32IA-NEXT:    # in Loop: Header=BB219_2 Depth=1
+; RV32IA-NEXT:    # in Loop: Header=BB229_2 Depth=1
 ; RV32IA-NEXT:    mv a2, s2
 ; RV32IA-NEXT:    mv a3, s1
-; RV32IA-NEXT:    j .LBB219_1
-; RV32IA-NEXT:  .LBB219_7: # %atomicrmw.end
+; RV32IA-NEXT:    j .LBB229_1
+; RV32IA-NEXT:  .LBB229_7: # %atomicrmw.end
 ; RV32IA-NEXT:    mv a0, a4
 ; RV32IA-NEXT:    mv a1, a5
 ; RV32IA-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
@@ -17242,9 +17878,9 @@ define i64 @atomicrmw_umin_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    ld a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
-; RV64I-NEXT:    j .LBB219_2
-; RV64I-NEXT:  .LBB219_1: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB219_2 Depth=1
+; RV64I-NEXT:    j .LBB229_2
+; RV64I-NEXT:  .LBB229_1: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB229_2 Depth=1
 ; RV64I-NEXT:    sd a3, 0(sp)
 ; RV64I-NEXT:    mv a1, sp
 ; RV64I-NEXT:    li a3, 5
@@ -17252,16 +17888,16 @@ define i64 @atomicrmw_umin_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld a3, 0(sp)
-; RV64I-NEXT:    bnez a0, .LBB219_4
-; RV64I-NEXT:  .LBB219_2: # %atomicrmw.start
+; RV64I-NEXT:    bnez a0, .LBB229_4
+; RV64I-NEXT:  .LBB229_2: # %atomicrmw.start
 ; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    bgeu s1, a3, .LBB219_1
+; RV64I-NEXT:    bgeu s1, a3, .LBB229_1
 ; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB219_2 Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB229_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
-; RV64I-NEXT:    j .LBB219_1
-; RV64I-NEXT:  .LBB219_4: # %atomicrmw.end
+; RV64I-NEXT:    j .LBB229_1
+; RV64I-NEXT:  .LBB229_4: # %atomicrmw.end
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload


        


More information about the llvm-commits mailing list