[llvm] 7294245 - [MBP] Avoid tail duplication if it can't bring benefit

Guozhi Wei via llvm-commits llvm-commits at lists.llvm.org
Fri Dec 6 09:56:16 PST 2019


Author: Guozhi Wei
Date: 2019-12-06T09:53:53-08:00
New Revision: 72942459d070cbfe6f3524e89c3ac37440be7890

URL: https://github.com/llvm/llvm-project/commit/72942459d070cbfe6f3524e89c3ac37440be7890
DIFF: https://github.com/llvm/llvm-project/commit/72942459d070cbfe6f3524e89c3ac37440be7890.diff

LOG: [MBP] Avoid tail duplication if it can't bring benefit

Current tail duplication integrated in bb layout is designed to increase the fallthrough from a BB's predecessor to its successor, but we have observed cases that duplication doesn't increase fallthrough, or it brings too much size overhead.

To overcome these two issues in function canTailDuplicateUnplacedPreds I add two checks:

  make sure there is at least one duplication in current work set.
  the number of duplication should not exceed the number of successors.

The modification in hasBetterLayoutPredecessor fixes a bug that potential predecessor must be at the bottom of a chain.

Differential Revision: https://reviews.llvm.org/D64376

Added: 
    llvm/test/CodeGen/PowerPC/no-duplicate.ll

Modified: 
    llvm/lib/CodeGen/MachineBlockPlacement.cpp
    llvm/test/CodeGen/AArch64/callbr-asm-obj-file.ll
    llvm/test/CodeGen/AArch64/swifterror.ll
    llvm/test/CodeGen/AArch64/tbz-tbnz.ll
    llvm/test/CodeGen/AMDGPU/loop_header_nopred.mir
    llvm/test/CodeGen/AMDGPU/uniform-cfg.ll
    llvm/test/CodeGen/PowerPC/block-placement.mir
    llvm/test/CodeGen/PowerPC/branch-opt.ll
    llvm/test/CodeGen/PowerPC/expand-contiguous-isel.ll
    llvm/test/CodeGen/RISCV/atomic-rmw.ll
    llvm/test/CodeGen/RISCV/remat.ll
    llvm/test/CodeGen/Thumb2/2010-02-11-phi-cycle.ll
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/mve-float-loops.ll
    llvm/test/CodeGen/Thumb2/cbnz.ll
    llvm/test/CodeGen/X86/mmx-coalescing.ll
    llvm/test/CodeGen/X86/pr38795.ll
    llvm/test/CodeGen/X86/ragreedy-hoist-spill.ll
    llvm/test/CodeGen/X86/reverse_branches.ll
    llvm/test/CodeGen/X86/shadow-stack.ll
    llvm/test/CodeGen/X86/speculative-load-hardening.ll
    llvm/test/CodeGen/X86/tail-dup-merge-loop-headers.ll
    llvm/test/CodeGen/X86/tail-dup-repeat.ll
    llvm/test/CodeGen/X86/tail-merge-after-mbp.mir
    llvm/test/CodeGen/X86/tail-opts.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/MachineBlockPlacement.cpp b/llvm/lib/CodeGen/MachineBlockPlacement.cpp
index a379283177fb..c2d9d1b9ac7f 100644
--- a/llvm/lib/CodeGen/MachineBlockPlacement.cpp
+++ b/llvm/lib/CodeGen/MachineBlockPlacement.cpp
@@ -1074,6 +1074,11 @@ bool MachineBlockPlacement::canTailDuplicateUnplacedPreds(
   if (!shouldTailDuplicate(Succ))
     return false;
 
+  // The result of canTailDuplicate.
+  bool Duplicate = true;
+  // Number of possible duplication.
+  unsigned int NumDup = 0;
+
   // For CFG checking.
   SmallPtrSet<const MachineBasicBlock *, 4> Successors(BB->succ_begin(),
                                                        BB->succ_end());
@@ -1120,9 +1125,50 @@ bool MachineBlockPlacement::canTailDuplicateUnplacedPreds(
         // to trellises created by tail-duplication, so we just look for the
         // CFG.
         continue;
-      return false;
+      Duplicate = false;
+      continue;
     }
+    NumDup++;
   }
+
+  // No possible duplication in current filter set.
+  if (NumDup == 0)
+    return false;
+
+  // This is mainly for function exit BB.
+  // The integrated tail duplication is really designed for increasing
+  // fallthrough from predecessors from Succ to its successors. We may need
+  // other machanism to handle 
diff erent cases.
+  if (Succ->succ_size() == 0)
+    return true;
+
+  // Plus the already placed predecessor.
+  NumDup++;
+
+  // If the duplication candidate has more unplaced predecessors than
+  // successors, the extra duplication can't bring more fallthrough.
+  //
+  //     Pred1 Pred2 Pred3
+  //         \   |   /
+  //          \  |  /
+  //           \ | /
+  //            Dup
+  //            / \
+  //           /   \
+  //       Succ1  Succ2
+  //
+  // In this example Dup has 2 successors and 3 predecessors, duplication of Dup
+  // can increase the fallthrough from Pred1 to Succ1 and from Pred2 to Succ2,
+  // but the duplication into Pred3 can't increase fallthrough.
+  //
+  // A small number of extra duplication may not hurt too much. We need a better
+  // heuristic to handle it.
+  //
+  // FIXME: we should selectively tail duplicate a BB into part of its
+  // predecessors.
+  if ((NumDup > Succ->succ_size()) || !Duplicate)
+    return false;
+
   return true;
 }
 
@@ -1418,9 +1464,10 @@ bool MachineBlockPlacement::hasBetterLayoutPredecessor(
   bool BadCFGConflict = false;
 
   for (MachineBasicBlock *Pred : Succ->predecessors()) {
-    if (Pred == Succ || BlockToChain[Pred] == &SuccChain ||
+    BlockChain *PredChain = BlockToChain[Pred];
+    if (Pred == Succ || PredChain == &SuccChain ||
         (BlockFilter && !BlockFilter->count(Pred)) ||
-        BlockToChain[Pred] == &Chain ||
+        PredChain == &Chain || Pred != *std::prev(PredChain->end()) ||
         // This check is redundant except for look ahead. This function is
         // called for lookahead by isProfitableToTailDup when BB hasn't been
         // placed yet.
@@ -1722,7 +1769,9 @@ void MachineBlockPlacement::buildChain(
     MachineBasicBlock* BestSucc = Result.BB;
     bool ShouldTailDup = Result.ShouldTailDup;
     if (allowTailDupPlacement())
-      ShouldTailDup |= (BestSucc && shouldTailDuplicate(BestSucc));
+      ShouldTailDup |= (BestSucc && canTailDuplicateUnplacedPreds(BB, BestSucc,
+                                                                  Chain,
+                                                                  BlockFilter));
 
     // If an immediate successor isn't available, look for the best viable
     // block among those we've identified as not violating the loop's CFG at

diff  --git a/llvm/test/CodeGen/AArch64/callbr-asm-obj-file.ll b/llvm/test/CodeGen/AArch64/callbr-asm-obj-file.ll
index 580679466140..345c3f9c4552 100644
--- a/llvm/test/CodeGen/AArch64/callbr-asm-obj-file.ll
+++ b/llvm/test/CodeGen/AArch64/callbr-asm-obj-file.ll
@@ -73,7 +73,7 @@ define hidden i32 @test2() local_unnamed_addr {
 ; CHECK-LABEL: test3:
 ; CHECK-LABEL: $d.9:
 ; CHECK-LABEL: $x.10:
-; CHECK-NEXT:    b #20 <$x.12+0x8>
+; CHECK-NEXT:    b #-20 <test3+0x18>
 ; CHECK-LABEL: $x.12:
 ; CHECK-NEXT:    b #4 <$x.12+0x4>
 ; CHECK-NEXT:    mov w0, wzr

diff  --git a/llvm/test/CodeGen/AArch64/swifterror.ll b/llvm/test/CodeGen/AArch64/swifterror.ll
index 93529d1ee86d..1eedb7620431 100644
--- a/llvm/test/CodeGen/AArch64/swifterror.ll
+++ b/llvm/test/CodeGen/AArch64/swifterror.ll
@@ -168,12 +168,12 @@ normal:
 define float @foo_loop(%swift_error** swifterror %error_ptr_ref, i32 %cc, float %cc2) {
 ; CHECK-APPLE-LABEL: foo_loop:
 ; CHECK-APPLE: mov x0, x21
+; CHECK-APPLE: fcmp
+; CHECK-APPLE: b.gt
 ; CHECK-APPLE: cbz
 ; CHECK-APPLE: mov w0, #16
 ; CHECK-APPLE: malloc
 ; CHECK-APPLE: strb w{{.*}}, [x0, #8]
-; CHECK-APPLE: fcmp
-; CHECK-APPLE: b.le
 ; CHECK-APPLE: mov x21, x0
 ; CHECK-APPLE: ret
 

diff  --git a/llvm/test/CodeGen/AArch64/tbz-tbnz.ll b/llvm/test/CodeGen/AArch64/tbz-tbnz.ll
index 5d6b3903fbbb..5018ce0b3878 100644
--- a/llvm/test/CodeGen/AArch64/tbz-tbnz.ll
+++ b/llvm/test/CodeGen/AArch64/tbz-tbnz.ll
@@ -153,7 +153,7 @@ if.then2:
   br i1 %tst3, label %if.then3, label %if.end
 
 ; CHECK: tst x0, x1, lsl #63
-; CHECK: b.ge
+; CHECK: b.lt
 
 if.then3:
   %shifted_op2 = shl i64 %val2, 62

diff  --git a/llvm/test/CodeGen/AMDGPU/loop_header_nopred.mir b/llvm/test/CodeGen/AMDGPU/loop_header_nopred.mir
index a1f23777e30e..4b01d1552624 100644
--- a/llvm/test/CodeGen/AMDGPU/loop_header_nopred.mir
+++ b/llvm/test/CodeGen/AMDGPU/loop_header_nopred.mir
@@ -9,9 +9,9 @@ name:            loop_header_nopred
 body:             |
   ; GCN-LABEL: name: loop_header_nopred
   ; GCN: bb.0:
-  ; GCN:   successors: %bb.2(0x40000000), %bb.3(0x40000000)
-  ; GCN:   S_CBRANCH_VCCZ %bb.3, implicit $vcc
-  ; GCN:   S_BRANCH %bb.2
+  ; GCN:   successors: %bb.1(0x80000000)
+  ; GCN:   S_INST_PREFETCH 1
+  ; GCN:   S_BRANCH %bb.1
   ; GCN: bb.6 (align 64):
   ; GCN:   successors: %bb.7(0x04000000), %bb.1(0x7c000000)
   ; GCN:   S_CBRANCH_VCCNZ %bb.7, implicit $vcc

diff  --git a/llvm/test/CodeGen/AMDGPU/uniform-cfg.ll b/llvm/test/CodeGen/AMDGPU/uniform-cfg.ll
index b5435ef14d64..965526340655 100644
--- a/llvm/test/CodeGen/AMDGPU/uniform-cfg.ll
+++ b/llvm/test/CodeGen/AMDGPU/uniform-cfg.ll
@@ -330,13 +330,14 @@ endif:
 
 ; GCN-LABEL: {{^}}divergent_inside_uniform:
 ; GCN: s_cmp_lg_u32 s{{[0-9]+}}, 0
-; GCN: s_cbranch_scc1 [[ENDIF_LABEL:[0-9_A-Za-z]+]]
+; GCN: s_cbranch_scc0 [[IF_LABEL:[0-9_A-Za-z]+]]
+; GCN: [[ENDIF_LABEL:[0-9_A-Za-z]+]]:
+; GCN: [[IF_LABEL]]:
 ; GCN: v_cmp_gt_u32_e32 vcc, 16, v{{[0-9]+}}
 ; GCN: s_and_saveexec_b64 [[MASK:s\[[0-9]+:[0-9]+\]]], vcc
 ; GCN: ; mask branch [[ENDIF_LABEL]]
 ; GCN: v_mov_b32_e32 [[ONE:v[0-9]+]], 1
 ; GCN: buffer_store_dword [[ONE]]
-; GCN: [[ENDIF_LABEL]]:
 ; GCN: s_endpgm
 define amdgpu_kernel void @divergent_inside_uniform(i32 addrspace(1)* %out, i32 %cond) {
 entry:

diff  --git a/llvm/test/CodeGen/PowerPC/block-placement.mir b/llvm/test/CodeGen/PowerPC/block-placement.mir
index 9dc911f785b5..34c4f1c7b695 100644
--- a/llvm/test/CodeGen/PowerPC/block-placement.mir
+++ b/llvm/test/CodeGen/PowerPC/block-placement.mir
@@ -210,13 +210,12 @@ body:             |
 
   ; CHECK:      bb.5.if.else.i:
   ; CHECK:        successors: %bb.11(0x80000000)
-  ; CHECK:        B %bb.11
-
-  ; CHECK:      bb.8.while.body.i (align 16):
-  ; CHECK:        successors: %bb.11(0x04000000), %bb.9(0x7c000000)
-  ; CHECK:        BCC 76, killed renamable $cr0, %bb.11
 
   ; CHECK:      bb.11:
   ; CHECK:        renamable $x3 = LI8 1
   ; CHECK-NEXT:   BLR8 implicit $lr8, implicit $rm, implicit killed $x3
+
+  ; CHECK:      bb.8.while.body.i (align 16):
+  ; CHECK:        successors: %bb.11(0x04000000), %bb.9(0x7c000000)
+  ; CHECK:        BCC 76, killed renamable $cr0, %bb.11
 ...

diff  --git a/llvm/test/CodeGen/PowerPC/branch-opt.ll b/llvm/test/CodeGen/PowerPC/branch-opt.ll
index 5e31270840b8..af75e761dbf4 100644
--- a/llvm/test/CodeGen/PowerPC/branch-opt.ll
+++ b/llvm/test/CodeGen/PowerPC/branch-opt.ll
@@ -8,13 +8,11 @@ target triple = "powerpc-unknown-linux-gnu"
 ; The last (whichever it is) should have a fallthrough exit, and the other three
 ; need an unconditional branch. No other block should have an unconditional
 ; branch to cond_next48
-; One of the blocks ends up with a loop exit block that gets a tail-duplicated copy
-; of %cond_next48, so there should only be two unconditional branches.
 
-;CHECK: b .LBB0_13
-;CHECK: b .LBB0_13
-;CHECK-NOT: b .LBB0_13
-;CHECK: .LBB0_13: # %cond_next48
+;CHECK: .LBB0_7: # %cond_next48
+;CHECK: b .LBB0_7
+;CHECK: b .LBB0_7
+;CHECK: b .LBB0_7
 
 define void @foo(i32 %W, i32 %X, i32 %Y, i32 %Z) {
 entry:

diff  --git a/llvm/test/CodeGen/PowerPC/expand-contiguous-isel.ll b/llvm/test/CodeGen/PowerPC/expand-contiguous-isel.ll
index 86caedcc09f0..1acb3e17a7f9 100644
--- a/llvm/test/CodeGen/PowerPC/expand-contiguous-isel.ll
+++ b/llvm/test/CodeGen/PowerPC/expand-contiguous-isel.ll
@@ -137,6 +137,7 @@ _ZNK4llvm9StringRef6substrEmm.exit:
 ; CHECK: bc 12, eq, [[TRUE:.LBB[0-9]+]]
 ; CHECK-NEXT: b [[SUCCESSOR:.LBB[0-9]+]]
 ; CHECK-NEXT: [[TRUE]]
+; CHECK-NEXT: # in Loop: Header
 ; CHECK-NEXT: addi {{r[0-9]+}}, {{r[0-9]+}}, 0
 ; CHECK-NEXT: [[SUCCESSOR]]
 }

diff  --git a/llvm/test/CodeGen/PowerPC/no-duplicate.ll b/llvm/test/CodeGen/PowerPC/no-duplicate.ll
new file mode 100644
index 000000000000..932ef1aa106b
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/no-duplicate.ll
@@ -0,0 +1,91 @@
+; RUN: llc -O2 < %s | FileCheck %s
+
+target triple = "powerpc64le-grtev4-linux-gnu"
+
+; No duplication of loop header into entry block.
+define void @no_duplicate1(i64 %a) {
+; CHECK-LABEL: no_duplicate1
+; CHECK:        mr 30, 3
+; CHECK-NEXT:   b .LBB0_2
+
+; CHECK:      .LBB0_2:
+; CHECK-NEXT:   # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:   cmpldi 30, 100
+; CHECK-NEXT:   bne 0, .LBB0_1
+entry:
+  br label %header
+
+header:
+  %ind = phi i64 [%a, %entry], [%val3, %latch]
+  %cond1 = icmp eq i64 %ind, 100
+  br i1 %cond1, label %middle, label %latch
+
+middle:
+  %condx = call i1 @foo()
+  %val1 = xor i64 %ind, 2
+  br label %latch
+
+latch:
+  %val2 = phi i64 [%ind, %header], [%val1, %middle]
+  %val3 = add i64 %val2, 1
+  %cond2 = call i1 @foo()
+  br i1 %cond2, label %end, label %header
+
+end:
+  ret void
+}
+
+; No duplication of loop header into latches.
+define void @no_duplicate2(i64 %a) {
+; CHECK-LABEL: no_duplicate2
+; CHECK:        mr 30, 3
+; CHECK-NEXT:   b .LBB1_2
+
+; CHECK:      .LBB1_2:
+; CHECK-NEXT:   # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:   cmpldi 30, 100
+; CHECK-NEXT:   bne 0, .LBB1_1
+
+; CHECK:      %latch2
+; CHECK:        b .LBB1_2
+
+; CHECK:      %latch3
+; CHECK:        b .LBB1_2
+entry:
+  br label %header
+
+header:
+  %ind = phi i64 [%a, %entry], [%val1, %latch1], [%val2, %latch2], [%val2, %latch3]
+  %cond1 = icmp eq i64 %ind, 100
+  br i1 %cond1, label %middle1, label %latch1
+
+latch1:
+  %cond2 = call i1 @foo()
+  %val1 = xor i64 %ind, 2
+  br i1 %cond2, label %end, label %header
+
+middle1:
+  %cond3 = call i1 @foo()
+  br i1 %cond3, label %latch1, label %middle2
+
+middle2:
+  %cond4 = call i1 @foo()
+  %val2 = add i64 %ind, 1
+  br i1 %cond4, label %latch2, label %latch3
+
+latch2:
+  call void @a()
+  br label %header
+
+latch3:
+  call void @b()
+  br label %header
+
+end:
+  ret void
+}
+
+
+declare i1 @foo()
+declare void @a()
+declare void @b()

diff  --git a/llvm/test/CodeGen/RISCV/atomic-rmw.ll b/llvm/test/CodeGen/RISCV/atomic-rmw.ll
index c94dc658792a..a84d50b5dca1 100644
--- a/llvm/test/CodeGen/RISCV/atomic-rmw.ll
+++ b/llvm/test/CodeGen/RISCV/atomic-rmw.ll
@@ -11500,10 +11500,9 @@ define i32 @atomicrmw_max_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    lw a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
 ; RV32I-NEXT:    addi s2, sp, 12
-; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    bge s1, a3, .LBB145_3
+; RV32I-NEXT:    j .LBB145_2
 ; RV32I-NEXT:  .LBB145_1: # %atomicrmw.start
-; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB145_2 Depth=1
 ; RV32I-NEXT:    sw a3, 12(sp)
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    mv a1, s2
@@ -11512,11 +11511,12 @@ define i32 @atomicrmw_max_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    call __atomic_compare_exchange_4
 ; RV32I-NEXT:    lw a3, 12(sp)
 ; RV32I-NEXT:    bnez a0, .LBB145_4
-; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB145_1 Depth=1
+; RV32I-NEXT:  .LBB145_2: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    mv a2, a3
 ; RV32I-NEXT:    blt s1, a3, .LBB145_1
-; RV32I-NEXT:  .LBB145_3: # %atomicrmw.start
+; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB145_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
 ; RV32I-NEXT:    j .LBB145_1
 ; RV32I-NEXT:  .LBB145_4: # %atomicrmw.end
@@ -11596,10 +11596,9 @@ define i32 @atomicrmw_max_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    lw a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
 ; RV32I-NEXT:    addi s2, sp, 12
-; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    bge s1, a3, .LBB146_3
+; RV32I-NEXT:    j .LBB146_2
 ; RV32I-NEXT:  .LBB146_1: # %atomicrmw.start
-; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB146_2 Depth=1
 ; RV32I-NEXT:    sw a3, 12(sp)
 ; RV32I-NEXT:    addi a3, zero, 2
 ; RV32I-NEXT:    addi a4, zero, 2
@@ -11608,11 +11607,12 @@ define i32 @atomicrmw_max_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    call __atomic_compare_exchange_4
 ; RV32I-NEXT:    lw a3, 12(sp)
 ; RV32I-NEXT:    bnez a0, .LBB146_4
-; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB146_1 Depth=1
+; RV32I-NEXT:  .LBB146_2: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    mv a2, a3
 ; RV32I-NEXT:    blt s1, a3, .LBB146_1
-; RV32I-NEXT:  .LBB146_3: # %atomicrmw.start
+; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB146_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
 ; RV32I-NEXT:    j .LBB146_1
 ; RV32I-NEXT:  .LBB146_4: # %atomicrmw.end
@@ -11692,10 +11692,9 @@ define i32 @atomicrmw_max_i32_release(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    lw a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
 ; RV32I-NEXT:    addi s2, sp, 12
-; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    bge s1, a3, .LBB147_3
+; RV32I-NEXT:    j .LBB147_2
 ; RV32I-NEXT:  .LBB147_1: # %atomicrmw.start
-; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB147_2 Depth=1
 ; RV32I-NEXT:    sw a3, 12(sp)
 ; RV32I-NEXT:    addi a3, zero, 3
 ; RV32I-NEXT:    mv a0, s0
@@ -11704,11 +11703,12 @@ define i32 @atomicrmw_max_i32_release(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    call __atomic_compare_exchange_4
 ; RV32I-NEXT:    lw a3, 12(sp)
 ; RV32I-NEXT:    bnez a0, .LBB147_4
-; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB147_1 Depth=1
+; RV32I-NEXT:  .LBB147_2: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    mv a2, a3
 ; RV32I-NEXT:    blt s1, a3, .LBB147_1
-; RV32I-NEXT:  .LBB147_3: # %atomicrmw.start
+; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB147_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
 ; RV32I-NEXT:    j .LBB147_1
 ; RV32I-NEXT:  .LBB147_4: # %atomicrmw.end
@@ -11788,10 +11788,9 @@ define i32 @atomicrmw_max_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    lw a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
 ; RV32I-NEXT:    addi s2, sp, 12
-; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    bge s1, a3, .LBB148_3
+; RV32I-NEXT:    j .LBB148_2
 ; RV32I-NEXT:  .LBB148_1: # %atomicrmw.start
-; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB148_2 Depth=1
 ; RV32I-NEXT:    sw a3, 12(sp)
 ; RV32I-NEXT:    addi a3, zero, 4
 ; RV32I-NEXT:    addi a4, zero, 2
@@ -11800,11 +11799,12 @@ define i32 @atomicrmw_max_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    call __atomic_compare_exchange_4
 ; RV32I-NEXT:    lw a3, 12(sp)
 ; RV32I-NEXT:    bnez a0, .LBB148_4
-; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB148_1 Depth=1
+; RV32I-NEXT:  .LBB148_2: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    mv a2, a3
 ; RV32I-NEXT:    blt s1, a3, .LBB148_1
-; RV32I-NEXT:  .LBB148_3: # %atomicrmw.start
+; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB148_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
 ; RV32I-NEXT:    j .LBB148_1
 ; RV32I-NEXT:  .LBB148_4: # %atomicrmw.end
@@ -11884,10 +11884,9 @@ define i32 @atomicrmw_max_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    lw a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
 ; RV32I-NEXT:    addi s2, sp, 12
-; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    bge s1, a3, .LBB149_3
+; RV32I-NEXT:    j .LBB149_2
 ; RV32I-NEXT:  .LBB149_1: # %atomicrmw.start
-; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB149_2 Depth=1
 ; RV32I-NEXT:    sw a3, 12(sp)
 ; RV32I-NEXT:    addi a3, zero, 5
 ; RV32I-NEXT:    addi a4, zero, 5
@@ -11896,11 +11895,12 @@ define i32 @atomicrmw_max_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    call __atomic_compare_exchange_4
 ; RV32I-NEXT:    lw a3, 12(sp)
 ; RV32I-NEXT:    bnez a0, .LBB149_4
-; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB149_1 Depth=1
+; RV32I-NEXT:  .LBB149_2: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    mv a2, a3
 ; RV32I-NEXT:    blt s1, a3, .LBB149_1
-; RV32I-NEXT:  .LBB149_3: # %atomicrmw.start
+; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB149_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
 ; RV32I-NEXT:    j .LBB149_1
 ; RV32I-NEXT:  .LBB149_4: # %atomicrmw.end
@@ -11980,10 +11980,9 @@ define i32 @atomicrmw_min_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    lw a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
 ; RV32I-NEXT:    addi s2, sp, 12
-; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    blt s1, a3, .LBB150_3
+; RV32I-NEXT:    j .LBB150_2
 ; RV32I-NEXT:  .LBB150_1: # %atomicrmw.start
-; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB150_2 Depth=1
 ; RV32I-NEXT:    sw a3, 12(sp)
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    mv a1, s2
@@ -11992,11 +11991,12 @@ define i32 @atomicrmw_min_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    call __atomic_compare_exchange_4
 ; RV32I-NEXT:    lw a3, 12(sp)
 ; RV32I-NEXT:    bnez a0, .LBB150_4
-; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB150_1 Depth=1
+; RV32I-NEXT:  .LBB150_2: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    mv a2, a3
 ; RV32I-NEXT:    bge s1, a3, .LBB150_1
-; RV32I-NEXT:  .LBB150_3: # %atomicrmw.start
+; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB150_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
 ; RV32I-NEXT:    j .LBB150_1
 ; RV32I-NEXT:  .LBB150_4: # %atomicrmw.end
@@ -12076,10 +12076,9 @@ define i32 @atomicrmw_min_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    lw a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
 ; RV32I-NEXT:    addi s2, sp, 12
-; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    blt s1, a3, .LBB151_3
+; RV32I-NEXT:    j .LBB151_2
 ; RV32I-NEXT:  .LBB151_1: # %atomicrmw.start
-; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB151_2 Depth=1
 ; RV32I-NEXT:    sw a3, 12(sp)
 ; RV32I-NEXT:    addi a3, zero, 2
 ; RV32I-NEXT:    addi a4, zero, 2
@@ -12088,11 +12087,12 @@ define i32 @atomicrmw_min_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    call __atomic_compare_exchange_4
 ; RV32I-NEXT:    lw a3, 12(sp)
 ; RV32I-NEXT:    bnez a0, .LBB151_4
-; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB151_1 Depth=1
+; RV32I-NEXT:  .LBB151_2: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    mv a2, a3
 ; RV32I-NEXT:    bge s1, a3, .LBB151_1
-; RV32I-NEXT:  .LBB151_3: # %atomicrmw.start
+; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB151_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
 ; RV32I-NEXT:    j .LBB151_1
 ; RV32I-NEXT:  .LBB151_4: # %atomicrmw.end
@@ -12172,10 +12172,9 @@ define i32 @atomicrmw_min_i32_release(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    lw a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
 ; RV32I-NEXT:    addi s2, sp, 12
-; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    blt s1, a3, .LBB152_3
+; RV32I-NEXT:    j .LBB152_2
 ; RV32I-NEXT:  .LBB152_1: # %atomicrmw.start
-; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB152_2 Depth=1
 ; RV32I-NEXT:    sw a3, 12(sp)
 ; RV32I-NEXT:    addi a3, zero, 3
 ; RV32I-NEXT:    mv a0, s0
@@ -12184,11 +12183,12 @@ define i32 @atomicrmw_min_i32_release(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    call __atomic_compare_exchange_4
 ; RV32I-NEXT:    lw a3, 12(sp)
 ; RV32I-NEXT:    bnez a0, .LBB152_4
-; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB152_1 Depth=1
+; RV32I-NEXT:  .LBB152_2: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    mv a2, a3
 ; RV32I-NEXT:    bge s1, a3, .LBB152_1
-; RV32I-NEXT:  .LBB152_3: # %atomicrmw.start
+; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB152_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
 ; RV32I-NEXT:    j .LBB152_1
 ; RV32I-NEXT:  .LBB152_4: # %atomicrmw.end
@@ -12268,10 +12268,9 @@ define i32 @atomicrmw_min_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    lw a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
 ; RV32I-NEXT:    addi s2, sp, 12
-; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    blt s1, a3, .LBB153_3
+; RV32I-NEXT:    j .LBB153_2
 ; RV32I-NEXT:  .LBB153_1: # %atomicrmw.start
-; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB153_2 Depth=1
 ; RV32I-NEXT:    sw a3, 12(sp)
 ; RV32I-NEXT:    addi a3, zero, 4
 ; RV32I-NEXT:    addi a4, zero, 2
@@ -12280,11 +12279,12 @@ define i32 @atomicrmw_min_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    call __atomic_compare_exchange_4
 ; RV32I-NEXT:    lw a3, 12(sp)
 ; RV32I-NEXT:    bnez a0, .LBB153_4
-; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB153_1 Depth=1
+; RV32I-NEXT:  .LBB153_2: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    mv a2, a3
 ; RV32I-NEXT:    bge s1, a3, .LBB153_1
-; RV32I-NEXT:  .LBB153_3: # %atomicrmw.start
+; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB153_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
 ; RV32I-NEXT:    j .LBB153_1
 ; RV32I-NEXT:  .LBB153_4: # %atomicrmw.end
@@ -12364,10 +12364,9 @@ define i32 @atomicrmw_min_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    lw a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
 ; RV32I-NEXT:    addi s2, sp, 12
-; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    blt s1, a3, .LBB154_3
+; RV32I-NEXT:    j .LBB154_2
 ; RV32I-NEXT:  .LBB154_1: # %atomicrmw.start
-; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB154_2 Depth=1
 ; RV32I-NEXT:    sw a3, 12(sp)
 ; RV32I-NEXT:    addi a3, zero, 5
 ; RV32I-NEXT:    addi a4, zero, 5
@@ -12376,11 +12375,12 @@ define i32 @atomicrmw_min_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    call __atomic_compare_exchange_4
 ; RV32I-NEXT:    lw a3, 12(sp)
 ; RV32I-NEXT:    bnez a0, .LBB154_4
-; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB154_1 Depth=1
+; RV32I-NEXT:  .LBB154_2: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    mv a2, a3
 ; RV32I-NEXT:    bge s1, a3, .LBB154_1
-; RV32I-NEXT:  .LBB154_3: # %atomicrmw.start
+; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB154_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
 ; RV32I-NEXT:    j .LBB154_1
 ; RV32I-NEXT:  .LBB154_4: # %atomicrmw.end
@@ -12460,10 +12460,9 @@ define i32 @atomicrmw_umax_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    lw a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
 ; RV32I-NEXT:    addi s2, sp, 12
-; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    bgeu s1, a3, .LBB155_3
+; RV32I-NEXT:    j .LBB155_2
 ; RV32I-NEXT:  .LBB155_1: # %atomicrmw.start
-; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB155_2 Depth=1
 ; RV32I-NEXT:    sw a3, 12(sp)
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    mv a1, s2
@@ -12472,11 +12471,12 @@ define i32 @atomicrmw_umax_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    call __atomic_compare_exchange_4
 ; RV32I-NEXT:    lw a3, 12(sp)
 ; RV32I-NEXT:    bnez a0, .LBB155_4
-; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB155_1 Depth=1
+; RV32I-NEXT:  .LBB155_2: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    mv a2, a3
 ; RV32I-NEXT:    bltu s1, a3, .LBB155_1
-; RV32I-NEXT:  .LBB155_3: # %atomicrmw.start
+; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB155_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
 ; RV32I-NEXT:    j .LBB155_1
 ; RV32I-NEXT:  .LBB155_4: # %atomicrmw.end
@@ -12556,10 +12556,9 @@ define i32 @atomicrmw_umax_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    lw a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
 ; RV32I-NEXT:    addi s2, sp, 12
-; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    bgeu s1, a3, .LBB156_3
+; RV32I-NEXT:    j .LBB156_2
 ; RV32I-NEXT:  .LBB156_1: # %atomicrmw.start
-; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB156_2 Depth=1
 ; RV32I-NEXT:    sw a3, 12(sp)
 ; RV32I-NEXT:    addi a3, zero, 2
 ; RV32I-NEXT:    addi a4, zero, 2
@@ -12568,11 +12567,12 @@ define i32 @atomicrmw_umax_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    call __atomic_compare_exchange_4
 ; RV32I-NEXT:    lw a3, 12(sp)
 ; RV32I-NEXT:    bnez a0, .LBB156_4
-; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB156_1 Depth=1
+; RV32I-NEXT:  .LBB156_2: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    mv a2, a3
 ; RV32I-NEXT:    bltu s1, a3, .LBB156_1
-; RV32I-NEXT:  .LBB156_3: # %atomicrmw.start
+; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB156_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
 ; RV32I-NEXT:    j .LBB156_1
 ; RV32I-NEXT:  .LBB156_4: # %atomicrmw.end
@@ -12652,10 +12652,9 @@ define i32 @atomicrmw_umax_i32_release(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    lw a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
 ; RV32I-NEXT:    addi s2, sp, 12
-; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    bgeu s1, a3, .LBB157_3
+; RV32I-NEXT:    j .LBB157_2
 ; RV32I-NEXT:  .LBB157_1: # %atomicrmw.start
-; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB157_2 Depth=1
 ; RV32I-NEXT:    sw a3, 12(sp)
 ; RV32I-NEXT:    addi a3, zero, 3
 ; RV32I-NEXT:    mv a0, s0
@@ -12664,11 +12663,12 @@ define i32 @atomicrmw_umax_i32_release(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    call __atomic_compare_exchange_4
 ; RV32I-NEXT:    lw a3, 12(sp)
 ; RV32I-NEXT:    bnez a0, .LBB157_4
-; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB157_1 Depth=1
+; RV32I-NEXT:  .LBB157_2: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    mv a2, a3
 ; RV32I-NEXT:    bltu s1, a3, .LBB157_1
-; RV32I-NEXT:  .LBB157_3: # %atomicrmw.start
+; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB157_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
 ; RV32I-NEXT:    j .LBB157_1
 ; RV32I-NEXT:  .LBB157_4: # %atomicrmw.end
@@ -12748,10 +12748,9 @@ define i32 @atomicrmw_umax_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    lw a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
 ; RV32I-NEXT:    addi s2, sp, 12
-; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    bgeu s1, a3, .LBB158_3
+; RV32I-NEXT:    j .LBB158_2
 ; RV32I-NEXT:  .LBB158_1: # %atomicrmw.start
-; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB158_2 Depth=1
 ; RV32I-NEXT:    sw a3, 12(sp)
 ; RV32I-NEXT:    addi a3, zero, 4
 ; RV32I-NEXT:    addi a4, zero, 2
@@ -12760,11 +12759,12 @@ define i32 @atomicrmw_umax_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    call __atomic_compare_exchange_4
 ; RV32I-NEXT:    lw a3, 12(sp)
 ; RV32I-NEXT:    bnez a0, .LBB158_4
-; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB158_1 Depth=1
+; RV32I-NEXT:  .LBB158_2: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    mv a2, a3
 ; RV32I-NEXT:    bltu s1, a3, .LBB158_1
-; RV32I-NEXT:  .LBB158_3: # %atomicrmw.start
+; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB158_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
 ; RV32I-NEXT:    j .LBB158_1
 ; RV32I-NEXT:  .LBB158_4: # %atomicrmw.end
@@ -12844,10 +12844,9 @@ define i32 @atomicrmw_umax_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    lw a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
 ; RV32I-NEXT:    addi s2, sp, 12
-; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    bgeu s1, a3, .LBB159_3
+; RV32I-NEXT:    j .LBB159_2
 ; RV32I-NEXT:  .LBB159_1: # %atomicrmw.start
-; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB159_2 Depth=1
 ; RV32I-NEXT:    sw a3, 12(sp)
 ; RV32I-NEXT:    addi a3, zero, 5
 ; RV32I-NEXT:    addi a4, zero, 5
@@ -12856,11 +12855,12 @@ define i32 @atomicrmw_umax_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    call __atomic_compare_exchange_4
 ; RV32I-NEXT:    lw a3, 12(sp)
 ; RV32I-NEXT:    bnez a0, .LBB159_4
-; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB159_1 Depth=1
+; RV32I-NEXT:  .LBB159_2: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    mv a2, a3
 ; RV32I-NEXT:    bltu s1, a3, .LBB159_1
-; RV32I-NEXT:  .LBB159_3: # %atomicrmw.start
+; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB159_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
 ; RV32I-NEXT:    j .LBB159_1
 ; RV32I-NEXT:  .LBB159_4: # %atomicrmw.end
@@ -12940,10 +12940,9 @@ define i32 @atomicrmw_umin_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    lw a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
 ; RV32I-NEXT:    addi s2, sp, 12
-; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    bltu s1, a3, .LBB160_3
+; RV32I-NEXT:    j .LBB160_2
 ; RV32I-NEXT:  .LBB160_1: # %atomicrmw.start
-; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB160_2 Depth=1
 ; RV32I-NEXT:    sw a3, 12(sp)
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    mv a1, s2
@@ -12952,11 +12951,12 @@ define i32 @atomicrmw_umin_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    call __atomic_compare_exchange_4
 ; RV32I-NEXT:    lw a3, 12(sp)
 ; RV32I-NEXT:    bnez a0, .LBB160_4
-; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB160_1 Depth=1
+; RV32I-NEXT:  .LBB160_2: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    mv a2, a3
 ; RV32I-NEXT:    bgeu s1, a3, .LBB160_1
-; RV32I-NEXT:  .LBB160_3: # %atomicrmw.start
+; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB160_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
 ; RV32I-NEXT:    j .LBB160_1
 ; RV32I-NEXT:  .LBB160_4: # %atomicrmw.end
@@ -13036,10 +13036,9 @@ define i32 @atomicrmw_umin_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    lw a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
 ; RV32I-NEXT:    addi s2, sp, 12
-; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    bltu s1, a3, .LBB161_3
+; RV32I-NEXT:    j .LBB161_2
 ; RV32I-NEXT:  .LBB161_1: # %atomicrmw.start
-; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB161_2 Depth=1
 ; RV32I-NEXT:    sw a3, 12(sp)
 ; RV32I-NEXT:    addi a3, zero, 2
 ; RV32I-NEXT:    addi a4, zero, 2
@@ -13048,11 +13047,12 @@ define i32 @atomicrmw_umin_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    call __atomic_compare_exchange_4
 ; RV32I-NEXT:    lw a3, 12(sp)
 ; RV32I-NEXT:    bnez a0, .LBB161_4
-; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB161_1 Depth=1
+; RV32I-NEXT:  .LBB161_2: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    mv a2, a3
 ; RV32I-NEXT:    bgeu s1, a3, .LBB161_1
-; RV32I-NEXT:  .LBB161_3: # %atomicrmw.start
+; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB161_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
 ; RV32I-NEXT:    j .LBB161_1
 ; RV32I-NEXT:  .LBB161_4: # %atomicrmw.end
@@ -13132,10 +13132,9 @@ define i32 @atomicrmw_umin_i32_release(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    lw a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
 ; RV32I-NEXT:    addi s2, sp, 12
-; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    bltu s1, a3, .LBB162_3
+; RV32I-NEXT:    j .LBB162_2
 ; RV32I-NEXT:  .LBB162_1: # %atomicrmw.start
-; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB162_2 Depth=1
 ; RV32I-NEXT:    sw a3, 12(sp)
 ; RV32I-NEXT:    addi a3, zero, 3
 ; RV32I-NEXT:    mv a0, s0
@@ -13144,11 +13143,12 @@ define i32 @atomicrmw_umin_i32_release(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    call __atomic_compare_exchange_4
 ; RV32I-NEXT:    lw a3, 12(sp)
 ; RV32I-NEXT:    bnez a0, .LBB162_4
-; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB162_1 Depth=1
+; RV32I-NEXT:  LBB162_2: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    mv a2, a3
 ; RV32I-NEXT:    bgeu s1, a3, .LBB162_1
-; RV32I-NEXT:  .LBB162_3: # %atomicrmw.start
+; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB162_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
 ; RV32I-NEXT:    j .LBB162_1
 ; RV32I-NEXT:  .LBB162_4: # %atomicrmw.end
@@ -13228,10 +13228,9 @@ define i32 @atomicrmw_umin_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    lw a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
 ; RV32I-NEXT:    addi s2, sp, 12
-; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    bltu s1, a3, .LBB163_3
+; RV32I-NEXT:    j .LBB163_2
 ; RV32I-NEXT:  .LBB163_1: # %atomicrmw.start
-; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB163_2 Depth=1
 ; RV32I-NEXT:    sw a3, 12(sp)
 ; RV32I-NEXT:    addi a3, zero, 4
 ; RV32I-NEXT:    addi a4, zero, 2
@@ -13240,11 +13239,12 @@ define i32 @atomicrmw_umin_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    call __atomic_compare_exchange_4
 ; RV32I-NEXT:    lw a3, 12(sp)
 ; RV32I-NEXT:    bnez a0, .LBB163_4
-; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB163_1 Depth=1
+; RV32I-NEXT:  .LBB163_2: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    mv a2, a3
 ; RV32I-NEXT:    bgeu s1, a3, .LBB163_1
-; RV32I-NEXT:  .LBB163_3: # %atomicrmw.start
+; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB163_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
 ; RV32I-NEXT:    j .LBB163_1
 ; RV32I-NEXT:  .LBB163_4: # %atomicrmw.end
@@ -13324,10 +13324,9 @@ define i32 @atomicrmw_umin_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    lw a3, 0(a0)
 ; RV32I-NEXT:    mv s1, a1
 ; RV32I-NEXT:    addi s2, sp, 12
-; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:    bltu s1, a3, .LBB164_3
+; RV32I-NEXT:    j .LBB164_2
 ; RV32I-NEXT:  .LBB164_1: # %atomicrmw.start
-; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB164_2 Depth=1
 ; RV32I-NEXT:    sw a3, 12(sp)
 ; RV32I-NEXT:    addi a3, zero, 5
 ; RV32I-NEXT:    addi a4, zero, 5
@@ -13336,11 +13335,12 @@ define i32 @atomicrmw_umin_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    call __atomic_compare_exchange_4
 ; RV32I-NEXT:    lw a3, 12(sp)
 ; RV32I-NEXT:    bnez a0, .LBB164_4
-; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
-; RV32I-NEXT:    # in Loop: Header=BB164_1 Depth=1
+; RV32I-NEXT:  .LBB164_2: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    mv a2, a3
 ; RV32I-NEXT:    bgeu s1, a3, .LBB164_1
-; RV32I-NEXT:  .LBB164_3: # %atomicrmw.start
+; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB164_2 Depth=1
 ; RV32I-NEXT:    mv a2, s1
 ; RV32I-NEXT:    j .LBB164_1
 ; RV32I-NEXT:  .LBB164_4: # %atomicrmw.end
@@ -14828,9 +14828,9 @@ define i64 @atomicrmw_max_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    mv s1, a2
 ; RV32I-NEXT:    mv s2, a1
 ; RV32I-NEXT:    mv s3, sp
-; RV32I-NEXT:    bne a5, s1, .LBB200_3
-; RV32I-NEXT:    j .LBB200_4
+; RV32I-NEXT:    j .LBB200_2
 ; RV32I-NEXT:  .LBB200_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB200_2 Depth=1
 ; RV32I-NEXT:    sw a4, 0(sp)
 ; RV32I-NEXT:    sw a5, 4(sp)
 ; RV32I-NEXT:    mv a0, s0
@@ -14841,18 +14841,22 @@ define i64 @atomicrmw_max_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    lw a5, 4(sp)
 ; RV32I-NEXT:    lw a4, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB200_7
-; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:  .LBB200_2: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    beq a5, s1, .LBB200_4
-; RV32I-NEXT:  .LBB200_3: # %atomicrmw.start
+; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB200_2 Depth=1
 ; RV32I-NEXT:    slt a0, s1, a5
 ; RV32I-NEXT:    j .LBB200_5
 ; RV32I-NEXT:  .LBB200_4:
 ; RV32I-NEXT:    sltu a0, s2, a4
 ; RV32I-NEXT:  .LBB200_5: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB200_2 Depth=1
 ; RV32I-NEXT:    mv a2, a4
 ; RV32I-NEXT:    mv a3, a5
 ; RV32I-NEXT:    bnez a0, .LBB200_1
 ; RV32I-NEXT:  # %bb.6: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB200_2 Depth=1
 ; RV32I-NEXT:    mv a2, s2
 ; RV32I-NEXT:    mv a3, s1
 ; RV32I-NEXT:    j .LBB200_1
@@ -14881,9 +14885,9 @@ define i64 @atomicrmw_max_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    mv s1, a2
 ; RV32IA-NEXT:    mv s2, a1
 ; RV32IA-NEXT:    mv s3, sp
-; RV32IA-NEXT:    bne a5, s1, .LBB200_3
-; RV32IA-NEXT:    j .LBB200_4
+; RV32IA-NEXT:    j .LBB200_2
 ; RV32IA-NEXT:  .LBB200_1: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB200_2 Depth=1
 ; RV32IA-NEXT:    sw a4, 0(sp)
 ; RV32IA-NEXT:    sw a5, 4(sp)
 ; RV32IA-NEXT:    mv a0, s0
@@ -14894,18 +14898,22 @@ define i64 @atomicrmw_max_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    lw a5, 4(sp)
 ; RV32IA-NEXT:    lw a4, 0(sp)
 ; RV32IA-NEXT:    bnez a0, .LBB200_7
-; RV32IA-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32IA-NEXT:  .LBB200_2: # %atomicrmw.start
+; RV32IA-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    beq a5, s1, .LBB200_4
-; RV32IA-NEXT:  .LBB200_3: # %atomicrmw.start
+; RV32IA-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB200_2 Depth=1
 ; RV32IA-NEXT:    slt a0, s1, a5
 ; RV32IA-NEXT:    j .LBB200_5
 ; RV32IA-NEXT:  .LBB200_4:
 ; RV32IA-NEXT:    sltu a0, s2, a4
 ; RV32IA-NEXT:  .LBB200_5: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB200_2 Depth=1
 ; RV32IA-NEXT:    mv a2, a4
 ; RV32IA-NEXT:    mv a3, a5
 ; RV32IA-NEXT:    bnez a0, .LBB200_1
 ; RV32IA-NEXT:  # %bb.6: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB200_2 Depth=1
 ; RV32IA-NEXT:    mv a2, s2
 ; RV32IA-NEXT:    mv a3, s1
 ; RV32IA-NEXT:    j .LBB200_1
@@ -14931,10 +14939,9 @@ define i64 @atomicrmw_max_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    ld a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    addi s2, sp, 8
-; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    bge s1, a3, .LBB200_3
+; RV64I-NEXT:    j .LBB200_2
 ; RV64I-NEXT:  .LBB200_1: # %atomicrmw.start
-; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB200_2 Depth=1
 ; RV64I-NEXT:    sd a3, 8(sp)
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    mv a1, s2
@@ -14943,11 +14950,12 @@ define i64 @atomicrmw_max_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    call __atomic_compare_exchange_8
 ; RV64I-NEXT:    ld a3, 8(sp)
 ; RV64I-NEXT:    bnez a0, .LBB200_4
-; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB200_1 Depth=1
+; RV64I-NEXT:  .LBB200_2: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    mv a2, a3
 ; RV64I-NEXT:    blt s1, a3, .LBB200_1
-; RV64I-NEXT:  .LBB200_3: # %atomicrmw.start
+; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB200_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
 ; RV64I-NEXT:    j .LBB200_1
 ; RV64I-NEXT:  .LBB200_4: # %atomicrmw.end
@@ -14982,9 +14990,9 @@ define i64 @atomicrmw_max_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    mv s1, a2
 ; RV32I-NEXT:    mv s2, a1
 ; RV32I-NEXT:    mv s3, sp
-; RV32I-NEXT:    bne a5, s1, .LBB201_3
-; RV32I-NEXT:    j .LBB201_4
+; RV32I-NEXT:    j .LBB201_2
 ; RV32I-NEXT:  .LBB201_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB201_2 Depth=1
 ; RV32I-NEXT:    sw a4, 0(sp)
 ; RV32I-NEXT:    sw a5, 4(sp)
 ; RV32I-NEXT:    addi a4, zero, 2
@@ -14995,18 +15003,22 @@ define i64 @atomicrmw_max_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    lw a5, 4(sp)
 ; RV32I-NEXT:    lw a4, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB201_7
-; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:  .LBB201_2: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    beq a5, s1, .LBB201_4
-; RV32I-NEXT:  .LBB201_3: # %atomicrmw.start
+; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB201_2 Depth=1
 ; RV32I-NEXT:    slt a0, s1, a5
 ; RV32I-NEXT:    j .LBB201_5
 ; RV32I-NEXT:  .LBB201_4:
 ; RV32I-NEXT:    sltu a0, s2, a4
 ; RV32I-NEXT:  .LBB201_5: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB201_2 Depth=1
 ; RV32I-NEXT:    mv a2, a4
 ; RV32I-NEXT:    mv a3, a5
 ; RV32I-NEXT:    bnez a0, .LBB201_1
 ; RV32I-NEXT:  # %bb.6: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB201_2 Depth=1
 ; RV32I-NEXT:    mv a2, s2
 ; RV32I-NEXT:    mv a3, s1
 ; RV32I-NEXT:    j .LBB201_1
@@ -15035,9 +15047,9 @@ define i64 @atomicrmw_max_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    mv s1, a2
 ; RV32IA-NEXT:    mv s2, a1
 ; RV32IA-NEXT:    mv s3, sp
-; RV32IA-NEXT:    bne a5, s1, .LBB201_3
-; RV32IA-NEXT:    j .LBB201_4
+; RV32IA-NEXT:    j .LBB201_2
 ; RV32IA-NEXT:  .LBB201_1: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB201_2 Depth=1
 ; RV32IA-NEXT:    sw a4, 0(sp)
 ; RV32IA-NEXT:    sw a5, 4(sp)
 ; RV32IA-NEXT:    addi a4, zero, 2
@@ -15048,18 +15060,22 @@ define i64 @atomicrmw_max_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    lw a5, 4(sp)
 ; RV32IA-NEXT:    lw a4, 0(sp)
 ; RV32IA-NEXT:    bnez a0, .LBB201_7
-; RV32IA-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32IA-NEXT:  .LBB201_2: # %atomicrmw.start
+; RV32IA-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    beq a5, s1, .LBB201_4
-; RV32IA-NEXT:  .LBB201_3: # %atomicrmw.start
+; RV32IA-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB201_2 Depth=1
 ; RV32IA-NEXT:    slt a0, s1, a5
 ; RV32IA-NEXT:    j .LBB201_5
 ; RV32IA-NEXT:  .LBB201_4:
 ; RV32IA-NEXT:    sltu a0, s2, a4
 ; RV32IA-NEXT:  .LBB201_5: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB201_2 Depth=1
 ; RV32IA-NEXT:    mv a2, a4
 ; RV32IA-NEXT:    mv a3, a5
 ; RV32IA-NEXT:    bnez a0, .LBB201_1
 ; RV32IA-NEXT:  # %bb.6: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB201_2 Depth=1
 ; RV32IA-NEXT:    mv a2, s2
 ; RV32IA-NEXT:    mv a3, s1
 ; RV32IA-NEXT:    j .LBB201_1
@@ -15085,10 +15101,9 @@ define i64 @atomicrmw_max_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    ld a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    addi s2, sp, 8
-; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    bge s1, a3, .LBB201_3
+; RV64I-NEXT:    j .LBB201_2
 ; RV64I-NEXT:  .LBB201_1: # %atomicrmw.start
-; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB201_2 Depth=1
 ; RV64I-NEXT:    sd a3, 8(sp)
 ; RV64I-NEXT:    addi a3, zero, 2
 ; RV64I-NEXT:    addi a4, zero, 2
@@ -15097,11 +15112,12 @@ define i64 @atomicrmw_max_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    call __atomic_compare_exchange_8
 ; RV64I-NEXT:    ld a3, 8(sp)
 ; RV64I-NEXT:    bnez a0, .LBB201_4
-; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB201_1 Depth=1
+; RV64I-NEXT:  .LBB201_2: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    mv a2, a3
 ; RV64I-NEXT:    blt s1, a3, .LBB201_1
-; RV64I-NEXT:  .LBB201_3: # %atomicrmw.start
+; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB201_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
 ; RV64I-NEXT:    j .LBB201_1
 ; RV64I-NEXT:  .LBB201_4: # %atomicrmw.end
@@ -15136,9 +15152,9 @@ define i64 @atomicrmw_max_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    mv s1, a2
 ; RV32I-NEXT:    mv s2, a1
 ; RV32I-NEXT:    mv s3, sp
-; RV32I-NEXT:    bne a5, s1, .LBB202_3
-; RV32I-NEXT:    j .LBB202_4
+; RV32I-NEXT:    j .LBB202_2
 ; RV32I-NEXT:  .LBB202_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB202_2 Depth=1
 ; RV32I-NEXT:    sw a4, 0(sp)
 ; RV32I-NEXT:    sw a5, 4(sp)
 ; RV32I-NEXT:    addi a4, zero, 3
@@ -15149,18 +15165,22 @@ define i64 @atomicrmw_max_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    lw a5, 4(sp)
 ; RV32I-NEXT:    lw a4, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB202_7
-; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:  .LBB202_2: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    beq a5, s1, .LBB202_4
-; RV32I-NEXT:  .LBB202_3: # %atomicrmw.start
+; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB202_2 Depth=1
 ; RV32I-NEXT:    slt a0, s1, a5
 ; RV32I-NEXT:    j .LBB202_5
 ; RV32I-NEXT:  .LBB202_4:
 ; RV32I-NEXT:    sltu a0, s2, a4
 ; RV32I-NEXT:  .LBB202_5: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB202_2 Depth=1
 ; RV32I-NEXT:    mv a2, a4
 ; RV32I-NEXT:    mv a3, a5
 ; RV32I-NEXT:    bnez a0, .LBB202_1
 ; RV32I-NEXT:  # %bb.6: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB202_2 Depth=1
 ; RV32I-NEXT:    mv a2, s2
 ; RV32I-NEXT:    mv a3, s1
 ; RV32I-NEXT:    j .LBB202_1
@@ -15189,9 +15209,9 @@ define i64 @atomicrmw_max_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    mv s1, a2
 ; RV32IA-NEXT:    mv s2, a1
 ; RV32IA-NEXT:    mv s3, sp
-; RV32IA-NEXT:    bne a5, s1, .LBB202_3
-; RV32IA-NEXT:    j .LBB202_4
+; RV32IA-NEXT:    j .LBB202_2
 ; RV32IA-NEXT:  .LBB202_1: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB202_2 Depth=1
 ; RV32IA-NEXT:    sw a4, 0(sp)
 ; RV32IA-NEXT:    sw a5, 4(sp)
 ; RV32IA-NEXT:    addi a4, zero, 3
@@ -15202,18 +15222,22 @@ define i64 @atomicrmw_max_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    lw a5, 4(sp)
 ; RV32IA-NEXT:    lw a4, 0(sp)
 ; RV32IA-NEXT:    bnez a0, .LBB202_7
-; RV32IA-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32IA-NEXT:  .LBB202_2: # %atomicrmw.start
+; RV32IA-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    beq a5, s1, .LBB202_4
-; RV32IA-NEXT:  .LBB202_3: # %atomicrmw.start
+; RV32IA-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB202_2 Depth=1
 ; RV32IA-NEXT:    slt a0, s1, a5
 ; RV32IA-NEXT:    j .LBB202_5
 ; RV32IA-NEXT:  .LBB202_4:
 ; RV32IA-NEXT:    sltu a0, s2, a4
 ; RV32IA-NEXT:  .LBB202_5: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB202_2 Depth=1
 ; RV32IA-NEXT:    mv a2, a4
 ; RV32IA-NEXT:    mv a3, a5
 ; RV32IA-NEXT:    bnez a0, .LBB202_1
 ; RV32IA-NEXT:  # %bb.6: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB202_2 Depth=1
 ; RV32IA-NEXT:    mv a2, s2
 ; RV32IA-NEXT:    mv a3, s1
 ; RV32IA-NEXT:    j .LBB202_1
@@ -15239,10 +15263,9 @@ define i64 @atomicrmw_max_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    ld a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    addi s2, sp, 8
-; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    bge s1, a3, .LBB202_3
+; RV64I-NEXT:    j .LBB202_2
 ; RV64I-NEXT:  .LBB202_1: # %atomicrmw.start
-; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB202_2 Depth=1
 ; RV64I-NEXT:    sd a3, 8(sp)
 ; RV64I-NEXT:    addi a3, zero, 3
 ; RV64I-NEXT:    mv a0, s0
@@ -15251,11 +15274,12 @@ define i64 @atomicrmw_max_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    call __atomic_compare_exchange_8
 ; RV64I-NEXT:    ld a3, 8(sp)
 ; RV64I-NEXT:    bnez a0, .LBB202_4
-; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB202_1 Depth=1
+; RV64I-NEXT:  .LBB202_2: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    mv a2, a3
 ; RV64I-NEXT:    blt s1, a3, .LBB202_1
-; RV64I-NEXT:  .LBB202_3: # %atomicrmw.start
+; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB202_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
 ; RV64I-NEXT:    j .LBB202_1
 ; RV64I-NEXT:  .LBB202_4: # %atomicrmw.end
@@ -15290,9 +15314,9 @@ define i64 @atomicrmw_max_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    mv s1, a2
 ; RV32I-NEXT:    mv s2, a1
 ; RV32I-NEXT:    mv s3, sp
-; RV32I-NEXT:    bne a5, s1, .LBB203_3
-; RV32I-NEXT:    j .LBB203_4
+; RV32I-NEXT:    j .LBB203_2
 ; RV32I-NEXT:  .LBB203_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB203_2 Depth=1
 ; RV32I-NEXT:    sw a4, 0(sp)
 ; RV32I-NEXT:    sw a5, 4(sp)
 ; RV32I-NEXT:    addi a4, zero, 4
@@ -15303,18 +15327,22 @@ define i64 @atomicrmw_max_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    lw a5, 4(sp)
 ; RV32I-NEXT:    lw a4, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB203_7
-; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:  .LBB203_2: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    beq a5, s1, .LBB203_4
-; RV32I-NEXT:  .LBB203_3: # %atomicrmw.start
+; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB203_2 Depth=1
 ; RV32I-NEXT:    slt a0, s1, a5
 ; RV32I-NEXT:    j .LBB203_5
 ; RV32I-NEXT:  .LBB203_4:
 ; RV32I-NEXT:    sltu a0, s2, a4
 ; RV32I-NEXT:  .LBB203_5: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB203_2 Depth=1
 ; RV32I-NEXT:    mv a2, a4
 ; RV32I-NEXT:    mv a3, a5
 ; RV32I-NEXT:    bnez a0, .LBB203_1
 ; RV32I-NEXT:  # %bb.6: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB203_2 Depth=1
 ; RV32I-NEXT:    mv a2, s2
 ; RV32I-NEXT:    mv a3, s1
 ; RV32I-NEXT:    j .LBB203_1
@@ -15343,9 +15371,9 @@ define i64 @atomicrmw_max_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    mv s1, a2
 ; RV32IA-NEXT:    mv s2, a1
 ; RV32IA-NEXT:    mv s3, sp
-; RV32IA-NEXT:    bne a5, s1, .LBB203_3
-; RV32IA-NEXT:    j .LBB203_4
+; RV32IA-NEXT:    j .LBB203_2
 ; RV32IA-NEXT:  .LBB203_1: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB203_2 Depth=1
 ; RV32IA-NEXT:    sw a4, 0(sp)
 ; RV32IA-NEXT:    sw a5, 4(sp)
 ; RV32IA-NEXT:    addi a4, zero, 4
@@ -15356,18 +15384,22 @@ define i64 @atomicrmw_max_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    lw a5, 4(sp)
 ; RV32IA-NEXT:    lw a4, 0(sp)
 ; RV32IA-NEXT:    bnez a0, .LBB203_7
-; RV32IA-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32IA-NEXT:  .LBB203_2: # %atomicrmw.start
+; RV32IA-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    beq a5, s1, .LBB203_4
-; RV32IA-NEXT:  .LBB203_3: # %atomicrmw.start
+; RV32IA-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB203_2 Depth=1
 ; RV32IA-NEXT:    slt a0, s1, a5
 ; RV32IA-NEXT:    j .LBB203_5
 ; RV32IA-NEXT:  .LBB203_4:
 ; RV32IA-NEXT:    sltu a0, s2, a4
 ; RV32IA-NEXT:  .LBB203_5: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB203_2 Depth=1
 ; RV32IA-NEXT:    mv a2, a4
 ; RV32IA-NEXT:    mv a3, a5
 ; RV32IA-NEXT:    bnez a0, .LBB203_1
 ; RV32IA-NEXT:  # %bb.6: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB203_2 Depth=1
 ; RV32IA-NEXT:    mv a2, s2
 ; RV32IA-NEXT:    mv a3, s1
 ; RV32IA-NEXT:    j .LBB203_1
@@ -15393,10 +15425,9 @@ define i64 @atomicrmw_max_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    ld a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    addi s2, sp, 8
-; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    bge s1, a3, .LBB203_3
+; RV64I-NEXT:    j .LBB203_2
 ; RV64I-NEXT:  .LBB203_1: # %atomicrmw.start
-; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB203_2 Depth=1
 ; RV64I-NEXT:    sd a3, 8(sp)
 ; RV64I-NEXT:    addi a3, zero, 4
 ; RV64I-NEXT:    addi a4, zero, 2
@@ -15405,11 +15436,12 @@ define i64 @atomicrmw_max_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    call __atomic_compare_exchange_8
 ; RV64I-NEXT:    ld a3, 8(sp)
 ; RV64I-NEXT:    bnez a0, .LBB203_4
-; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB203_1 Depth=1
+; RV64I-NEXT:  .LBB203_2: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    mv a2, a3
 ; RV64I-NEXT:    blt s1, a3, .LBB203_1
-; RV64I-NEXT:  .LBB203_3: # %atomicrmw.start
+; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB203_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
 ; RV64I-NEXT:    j .LBB203_1
 ; RV64I-NEXT:  .LBB203_4: # %atomicrmw.end
@@ -15444,9 +15476,9 @@ define i64 @atomicrmw_max_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    mv s1, a2
 ; RV32I-NEXT:    mv s2, a1
 ; RV32I-NEXT:    mv s3, sp
-; RV32I-NEXT:    bne a5, s1, .LBB204_3
-; RV32I-NEXT:    j .LBB204_4
+; RV32I-NEXT:    j .LBB204_2
 ; RV32I-NEXT:  .LBB204_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB204_2 Depth=1
 ; RV32I-NEXT:    sw a4, 0(sp)
 ; RV32I-NEXT:    sw a5, 4(sp)
 ; RV32I-NEXT:    addi a4, zero, 5
@@ -15457,18 +15489,22 @@ define i64 @atomicrmw_max_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    lw a5, 4(sp)
 ; RV32I-NEXT:    lw a4, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB204_7
-; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:  .LBB204_2: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    beq a5, s1, .LBB204_4
-; RV32I-NEXT:  .LBB204_3: # %atomicrmw.start
+; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB204_2 Depth=1
 ; RV32I-NEXT:    slt a0, s1, a5
 ; RV32I-NEXT:    j .LBB204_5
 ; RV32I-NEXT:  .LBB204_4:
 ; RV32I-NEXT:    sltu a0, s2, a4
 ; RV32I-NEXT:  .LBB204_5: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB204_2 Depth=1
 ; RV32I-NEXT:    mv a2, a4
 ; RV32I-NEXT:    mv a3, a5
 ; RV32I-NEXT:    bnez a0, .LBB204_1
 ; RV32I-NEXT:  # %bb.6: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB204_2 Depth=1
 ; RV32I-NEXT:    mv a2, s2
 ; RV32I-NEXT:    mv a3, s1
 ; RV32I-NEXT:    j .LBB204_1
@@ -15497,9 +15533,9 @@ define i64 @atomicrmw_max_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    mv s1, a2
 ; RV32IA-NEXT:    mv s2, a1
 ; RV32IA-NEXT:    mv s3, sp
-; RV32IA-NEXT:    bne a5, s1, .LBB204_3
-; RV32IA-NEXT:    j .LBB204_4
+; RV32IA-NEXT:    j .LBB204_2
 ; RV32IA-NEXT:  .LBB204_1: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB204_2 Depth=1
 ; RV32IA-NEXT:    sw a4, 0(sp)
 ; RV32IA-NEXT:    sw a5, 4(sp)
 ; RV32IA-NEXT:    addi a4, zero, 5
@@ -15510,18 +15546,22 @@ define i64 @atomicrmw_max_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    lw a5, 4(sp)
 ; RV32IA-NEXT:    lw a4, 0(sp)
 ; RV32IA-NEXT:    bnez a0, .LBB204_7
-; RV32IA-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32IA-NEXT:  .LBB204_2: # %atomicrmw.start
+; RV32IA-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    beq a5, s1, .LBB204_4
-; RV32IA-NEXT:  .LBB204_3: # %atomicrmw.start
+; RV32IA-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB204_2 Depth=1
 ; RV32IA-NEXT:    slt a0, s1, a5
 ; RV32IA-NEXT:    j .LBB204_5
 ; RV32IA-NEXT:  .LBB204_4:
 ; RV32IA-NEXT:    sltu a0, s2, a4
 ; RV32IA-NEXT:  .LBB204_5: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB204_2 Depth=1
 ; RV32IA-NEXT:    mv a2, a4
 ; RV32IA-NEXT:    mv a3, a5
 ; RV32IA-NEXT:    bnez a0, .LBB204_1
 ; RV32IA-NEXT:  # %bb.6: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB204_2 Depth=1
 ; RV32IA-NEXT:    mv a2, s2
 ; RV32IA-NEXT:    mv a3, s1
 ; RV32IA-NEXT:    j .LBB204_1
@@ -15547,10 +15587,9 @@ define i64 @atomicrmw_max_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    ld a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    addi s2, sp, 8
-; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    bge s1, a3, .LBB204_3
+; RV64I-NEXT:    j .LBB204_2
 ; RV64I-NEXT:  .LBB204_1: # %atomicrmw.start
-; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB204_2 Depth=1
 ; RV64I-NEXT:    sd a3, 8(sp)
 ; RV64I-NEXT:    addi a3, zero, 5
 ; RV64I-NEXT:    addi a4, zero, 5
@@ -15559,11 +15598,12 @@ define i64 @atomicrmw_max_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    call __atomic_compare_exchange_8
 ; RV64I-NEXT:    ld a3, 8(sp)
 ; RV64I-NEXT:    bnez a0, .LBB204_4
-; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB204_1 Depth=1
+; RV64I-NEXT:  .LBB204_2: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    mv a2, a3
 ; RV64I-NEXT:    blt s1, a3, .LBB204_1
-; RV64I-NEXT:  .LBB204_3: # %atomicrmw.start
+; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB204_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
 ; RV64I-NEXT:    j .LBB204_1
 ; RV64I-NEXT:  .LBB204_4: # %atomicrmw.end
@@ -15598,9 +15638,9 @@ define i64 @atomicrmw_min_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    mv s1, a2
 ; RV32I-NEXT:    mv s2, a1
 ; RV32I-NEXT:    mv s3, sp
-; RV32I-NEXT:    bne a5, s1, .LBB205_3
-; RV32I-NEXT:    j .LBB205_4
+; RV32I-NEXT:    j .LBB205_2
 ; RV32I-NEXT:  .LBB205_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB205_2 Depth=1
 ; RV32I-NEXT:    sw a4, 0(sp)
 ; RV32I-NEXT:    sw a5, 4(sp)
 ; RV32I-NEXT:    mv a0, s0
@@ -15611,19 +15651,23 @@ define i64 @atomicrmw_min_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    lw a5, 4(sp)
 ; RV32I-NEXT:    lw a4, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB205_7
-; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:  .LBB205_2: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    beq a5, s1, .LBB205_4
-; RV32I-NEXT:  .LBB205_3: # %atomicrmw.start
+; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB205_2 Depth=1
 ; RV32I-NEXT:    slt a0, s1, a5
 ; RV32I-NEXT:    j .LBB205_5
 ; RV32I-NEXT:  .LBB205_4:
 ; RV32I-NEXT:    sltu a0, s2, a4
 ; RV32I-NEXT:  .LBB205_5: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB205_2 Depth=1
 ; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:    mv a2, a4
 ; RV32I-NEXT:    mv a3, a5
 ; RV32I-NEXT:    bnez a0, .LBB205_1
 ; RV32I-NEXT:  # %bb.6: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB205_2 Depth=1
 ; RV32I-NEXT:    mv a2, s2
 ; RV32I-NEXT:    mv a3, s1
 ; RV32I-NEXT:    j .LBB205_1
@@ -15652,9 +15696,9 @@ define i64 @atomicrmw_min_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    mv s1, a2
 ; RV32IA-NEXT:    mv s2, a1
 ; RV32IA-NEXT:    mv s3, sp
-; RV32IA-NEXT:    bne a5, s1, .LBB205_3
-; RV32IA-NEXT:    j .LBB205_4
+; RV32IA-NEXT:    j .LBB205_2
 ; RV32IA-NEXT:  .LBB205_1: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB205_2 Depth=1
 ; RV32IA-NEXT:    sw a4, 0(sp)
 ; RV32IA-NEXT:    sw a5, 4(sp)
 ; RV32IA-NEXT:    mv a0, s0
@@ -15665,19 +15709,23 @@ define i64 @atomicrmw_min_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    lw a5, 4(sp)
 ; RV32IA-NEXT:    lw a4, 0(sp)
 ; RV32IA-NEXT:    bnez a0, .LBB205_7
-; RV32IA-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32IA-NEXT:  .LBB205_2: # %atomicrmw.start
+; RV32IA-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    beq a5, s1, .LBB205_4
-; RV32IA-NEXT:  .LBB205_3: # %atomicrmw.start
+; RV32IA-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB205_2 Depth=1
 ; RV32IA-NEXT:    slt a0, s1, a5
 ; RV32IA-NEXT:    j .LBB205_5
 ; RV32IA-NEXT:  .LBB205_4:
 ; RV32IA-NEXT:    sltu a0, s2, a4
 ; RV32IA-NEXT:  .LBB205_5: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB205_2 Depth=1
 ; RV32IA-NEXT:    xori a0, a0, 1
 ; RV32IA-NEXT:    mv a2, a4
 ; RV32IA-NEXT:    mv a3, a5
 ; RV32IA-NEXT:    bnez a0, .LBB205_1
 ; RV32IA-NEXT:  # %bb.6: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB205_2 Depth=1
 ; RV32IA-NEXT:    mv a2, s2
 ; RV32IA-NEXT:    mv a3, s1
 ; RV32IA-NEXT:    j .LBB205_1
@@ -15703,10 +15751,9 @@ define i64 @atomicrmw_min_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    ld a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    addi s2, sp, 8
-; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    blt s1, a3, .LBB205_3
+; RV64I-NEXT:    j .LBB205_2
 ; RV64I-NEXT:  .LBB205_1: # %atomicrmw.start
-; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB205_2 Depth=1
 ; RV64I-NEXT:    sd a3, 8(sp)
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    mv a1, s2
@@ -15715,11 +15762,12 @@ define i64 @atomicrmw_min_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    call __atomic_compare_exchange_8
 ; RV64I-NEXT:    ld a3, 8(sp)
 ; RV64I-NEXT:    bnez a0, .LBB205_4
-; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB205_1 Depth=1
+; RV64I-NEXT:  .LBB205_2: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    mv a2, a3
 ; RV64I-NEXT:    bge s1, a3, .LBB205_1
-; RV64I-NEXT:  .LBB205_3: # %atomicrmw.start
+; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB205_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
 ; RV64I-NEXT:    j .LBB205_1
 ; RV64I-NEXT:  .LBB205_4: # %atomicrmw.end
@@ -15754,9 +15802,9 @@ define i64 @atomicrmw_min_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    mv s1, a2
 ; RV32I-NEXT:    mv s2, a1
 ; RV32I-NEXT:    mv s3, sp
-; RV32I-NEXT:    bne a5, s1, .LBB206_3
-; RV32I-NEXT:    j .LBB206_4
+; RV32I-NEXT:    j .LBB206_2
 ; RV32I-NEXT:  .LBB206_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB206_2 Depth=1
 ; RV32I-NEXT:    sw a4, 0(sp)
 ; RV32I-NEXT:    sw a5, 4(sp)
 ; RV32I-NEXT:    addi a4, zero, 2
@@ -15767,19 +15815,23 @@ define i64 @atomicrmw_min_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    lw a5, 4(sp)
 ; RV32I-NEXT:    lw a4, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB206_7
-; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:  .LBB206_2: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    beq a5, s1, .LBB206_4
-; RV32I-NEXT:  .LBB206_3: # %atomicrmw.start
+; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB206_2 Depth=1
 ; RV32I-NEXT:    slt a0, s1, a5
 ; RV32I-NEXT:    j .LBB206_5
 ; RV32I-NEXT:  .LBB206_4:
 ; RV32I-NEXT:    sltu a0, s2, a4
 ; RV32I-NEXT:  .LBB206_5: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB206_2 Depth=1
 ; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:    mv a2, a4
 ; RV32I-NEXT:    mv a3, a5
 ; RV32I-NEXT:    bnez a0, .LBB206_1
 ; RV32I-NEXT:  # %bb.6: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB206_2 Depth=1
 ; RV32I-NEXT:    mv a2, s2
 ; RV32I-NEXT:    mv a3, s1
 ; RV32I-NEXT:    j .LBB206_1
@@ -15808,9 +15860,9 @@ define i64 @atomicrmw_min_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    mv s1, a2
 ; RV32IA-NEXT:    mv s2, a1
 ; RV32IA-NEXT:    mv s3, sp
-; RV32IA-NEXT:    bne a5, s1, .LBB206_3
-; RV32IA-NEXT:    j .LBB206_4
+; RV32IA-NEXT:    j .LBB206_2
 ; RV32IA-NEXT:  .LBB206_1: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB206_2 Depth=1
 ; RV32IA-NEXT:    sw a4, 0(sp)
 ; RV32IA-NEXT:    sw a5, 4(sp)
 ; RV32IA-NEXT:    addi a4, zero, 2
@@ -15821,19 +15873,23 @@ define i64 @atomicrmw_min_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    lw a5, 4(sp)
 ; RV32IA-NEXT:    lw a4, 0(sp)
 ; RV32IA-NEXT:    bnez a0, .LBB206_7
-; RV32IA-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32IA-NEXT:  .LBB206_2: # %atomicrmw.start
+; RV32IA-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    beq a5, s1, .LBB206_4
-; RV32IA-NEXT:  .LBB206_3: # %atomicrmw.start
+; RV32IA-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB206_2 Depth=1
 ; RV32IA-NEXT:    slt a0, s1, a5
 ; RV32IA-NEXT:    j .LBB206_5
 ; RV32IA-NEXT:  .LBB206_4:
 ; RV32IA-NEXT:    sltu a0, s2, a4
 ; RV32IA-NEXT:  .LBB206_5: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB206_2 Depth=1
 ; RV32IA-NEXT:    xori a0, a0, 1
 ; RV32IA-NEXT:    mv a2, a4
 ; RV32IA-NEXT:    mv a3, a5
 ; RV32IA-NEXT:    bnez a0, .LBB206_1
 ; RV32IA-NEXT:  # %bb.6: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB206_2 Depth=1
 ; RV32IA-NEXT:    mv a2, s2
 ; RV32IA-NEXT:    mv a3, s1
 ; RV32IA-NEXT:    j .LBB206_1
@@ -15859,10 +15915,9 @@ define i64 @atomicrmw_min_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    ld a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    addi s2, sp, 8
-; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    blt s1, a3, .LBB206_3
+; RV64I-NEXT:    j .LBB206_2
 ; RV64I-NEXT:  .LBB206_1: # %atomicrmw.start
-; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB206_2 Depth=1
 ; RV64I-NEXT:    sd a3, 8(sp)
 ; RV64I-NEXT:    addi a3, zero, 2
 ; RV64I-NEXT:    addi a4, zero, 2
@@ -15871,11 +15926,12 @@ define i64 @atomicrmw_min_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    call __atomic_compare_exchange_8
 ; RV64I-NEXT:    ld a3, 8(sp)
 ; RV64I-NEXT:    bnez a0, .LBB206_4
-; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB206_1 Depth=1
+; RV64I-NEXT:  .LBB206_2: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    mv a2, a3
 ; RV64I-NEXT:    bge s1, a3, .LBB206_1
-; RV64I-NEXT:  .LBB206_3: # %atomicrmw.start
+; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB206_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
 ; RV64I-NEXT:    j .LBB206_1
 ; RV64I-NEXT:  .LBB206_4: # %atomicrmw.end
@@ -15910,9 +15966,9 @@ define i64 @atomicrmw_min_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    mv s1, a2
 ; RV32I-NEXT:    mv s2, a1
 ; RV32I-NEXT:    mv s3, sp
-; RV32I-NEXT:    bne a5, s1, .LBB207_3
-; RV32I-NEXT:    j .LBB207_4
+; RV32I-NEXT:    j .LBB207_2
 ; RV32I-NEXT:  .LBB207_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB207_2 Depth=1
 ; RV32I-NEXT:    sw a4, 0(sp)
 ; RV32I-NEXT:    sw a5, 4(sp)
 ; RV32I-NEXT:    addi a4, zero, 3
@@ -15923,19 +15979,23 @@ define i64 @atomicrmw_min_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    lw a5, 4(sp)
 ; RV32I-NEXT:    lw a4, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB207_7
-; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:  .LBB207_2: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    beq a5, s1, .LBB207_4
-; RV32I-NEXT:  .LBB207_3: # %atomicrmw.start
+; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB207_2 Depth=1
 ; RV32I-NEXT:    slt a0, s1, a5
 ; RV32I-NEXT:    j .LBB207_5
 ; RV32I-NEXT:  .LBB207_4:
 ; RV32I-NEXT:    sltu a0, s2, a4
 ; RV32I-NEXT:  .LBB207_5: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB207_2 Depth=1
 ; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:    mv a2, a4
 ; RV32I-NEXT:    mv a3, a5
 ; RV32I-NEXT:    bnez a0, .LBB207_1
 ; RV32I-NEXT:  # %bb.6: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB207_2 Depth=1
 ; RV32I-NEXT:    mv a2, s2
 ; RV32I-NEXT:    mv a3, s1
 ; RV32I-NEXT:    j .LBB207_1
@@ -15964,9 +16024,9 @@ define i64 @atomicrmw_min_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    mv s1, a2
 ; RV32IA-NEXT:    mv s2, a1
 ; RV32IA-NEXT:    mv s3, sp
-; RV32IA-NEXT:    bne a5, s1, .LBB207_3
-; RV32IA-NEXT:    j .LBB207_4
+; RV32IA-NEXT:    j .LBB207_2
 ; RV32IA-NEXT:  .LBB207_1: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB207_2 Depth=1
 ; RV32IA-NEXT:    sw a4, 0(sp)
 ; RV32IA-NEXT:    sw a5, 4(sp)
 ; RV32IA-NEXT:    addi a4, zero, 3
@@ -15977,19 +16037,23 @@ define i64 @atomicrmw_min_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    lw a5, 4(sp)
 ; RV32IA-NEXT:    lw a4, 0(sp)
 ; RV32IA-NEXT:    bnez a0, .LBB207_7
-; RV32IA-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32IA-NEXT:  .LBB207_2: # %atomicrmw.start
+; RV32IA-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    beq a5, s1, .LBB207_4
-; RV32IA-NEXT:  .LBB207_3: # %atomicrmw.start
+; RV32IA-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB207_2 Depth=1
 ; RV32IA-NEXT:    slt a0, s1, a5
 ; RV32IA-NEXT:    j .LBB207_5
 ; RV32IA-NEXT:  .LBB207_4:
 ; RV32IA-NEXT:    sltu a0, s2, a4
 ; RV32IA-NEXT:  .LBB207_5: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB207_2 Depth=1
 ; RV32IA-NEXT:    xori a0, a0, 1
 ; RV32IA-NEXT:    mv a2, a4
 ; RV32IA-NEXT:    mv a3, a5
 ; RV32IA-NEXT:    bnez a0, .LBB207_1
 ; RV32IA-NEXT:  # %bb.6: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB207_2 Depth=1
 ; RV32IA-NEXT:    mv a2, s2
 ; RV32IA-NEXT:    mv a3, s1
 ; RV32IA-NEXT:    j .LBB207_1
@@ -16015,10 +16079,9 @@ define i64 @atomicrmw_min_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    ld a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    addi s2, sp, 8
-; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    blt s1, a3, .LBB207_3
+; RV64I-NEXT:    j .LBB207_2
 ; RV64I-NEXT:  .LBB207_1: # %atomicrmw.start
-; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB207_2 Depth=1
 ; RV64I-NEXT:    sd a3, 8(sp)
 ; RV64I-NEXT:    addi a3, zero, 3
 ; RV64I-NEXT:    mv a0, s0
@@ -16027,11 +16090,12 @@ define i64 @atomicrmw_min_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    call __atomic_compare_exchange_8
 ; RV64I-NEXT:    ld a3, 8(sp)
 ; RV64I-NEXT:    bnez a0, .LBB207_4
-; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB207_1 Depth=1
+; RV64I-NEXT:  .LBB207_2: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    mv a2, a3
 ; RV64I-NEXT:    bge s1, a3, .LBB207_1
-; RV64I-NEXT:  .LBB207_3: # %atomicrmw.start
+; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB207_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
 ; RV64I-NEXT:    j .LBB207_1
 ; RV64I-NEXT:  .LBB207_4: # %atomicrmw.end
@@ -16066,9 +16130,9 @@ define i64 @atomicrmw_min_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    mv s1, a2
 ; RV32I-NEXT:    mv s2, a1
 ; RV32I-NEXT:    mv s3, sp
-; RV32I-NEXT:    bne a5, s1, .LBB208_3
-; RV32I-NEXT:    j .LBB208_4
+; RV32I-NEXT:    j .LBB208_2
 ; RV32I-NEXT:  .LBB208_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB208_2 Depth=1
 ; RV32I-NEXT:    sw a4, 0(sp)
 ; RV32I-NEXT:    sw a5, 4(sp)
 ; RV32I-NEXT:    addi a4, zero, 4
@@ -16079,19 +16143,23 @@ define i64 @atomicrmw_min_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    lw a5, 4(sp)
 ; RV32I-NEXT:    lw a4, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB208_7
-; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:  .LBB208_2: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    beq a5, s1, .LBB208_4
-; RV32I-NEXT:  .LBB208_3: # %atomicrmw.start
+; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB208_2 Depth=1
 ; RV32I-NEXT:    slt a0, s1, a5
 ; RV32I-NEXT:    j .LBB208_5
 ; RV32I-NEXT:  .LBB208_4:
 ; RV32I-NEXT:    sltu a0, s2, a4
 ; RV32I-NEXT:  .LBB208_5: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB208_2 Depth=1
 ; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:    mv a2, a4
 ; RV32I-NEXT:    mv a3, a5
 ; RV32I-NEXT:    bnez a0, .LBB208_1
 ; RV32I-NEXT:  # %bb.6: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB208_2 Depth=1
 ; RV32I-NEXT:    mv a2, s2
 ; RV32I-NEXT:    mv a3, s1
 ; RV32I-NEXT:    j .LBB208_1
@@ -16120,9 +16188,9 @@ define i64 @atomicrmw_min_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    mv s1, a2
 ; RV32IA-NEXT:    mv s2, a1
 ; RV32IA-NEXT:    mv s3, sp
-; RV32IA-NEXT:    bne a5, s1, .LBB208_3
-; RV32IA-NEXT:    j .LBB208_4
+; RV32IA-NEXT:    j .LBB208_2
 ; RV32IA-NEXT:  .LBB208_1: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB208_2 Depth=1
 ; RV32IA-NEXT:    sw a4, 0(sp)
 ; RV32IA-NEXT:    sw a5, 4(sp)
 ; RV32IA-NEXT:    addi a4, zero, 4
@@ -16133,19 +16201,23 @@ define i64 @atomicrmw_min_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    lw a5, 4(sp)
 ; RV32IA-NEXT:    lw a4, 0(sp)
 ; RV32IA-NEXT:    bnez a0, .LBB208_7
-; RV32IA-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32IA-NEXT:  .LBB208_2: # %atomicrmw.start
+; RV32IA-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    beq a5, s1, .LBB208_4
-; RV32IA-NEXT:  .LBB208_3: # %atomicrmw.start
+; RV32IA-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB208_2 Depth=1
 ; RV32IA-NEXT:    slt a0, s1, a5
 ; RV32IA-NEXT:    j .LBB208_5
 ; RV32IA-NEXT:  .LBB208_4:
 ; RV32IA-NEXT:    sltu a0, s2, a4
 ; RV32IA-NEXT:  .LBB208_5: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB208_2 Depth=1
 ; RV32IA-NEXT:    xori a0, a0, 1
 ; RV32IA-NEXT:    mv a2, a4
 ; RV32IA-NEXT:    mv a3, a5
 ; RV32IA-NEXT:    bnez a0, .LBB208_1
 ; RV32IA-NEXT:  # %bb.6: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB208_2 Depth=1
 ; RV32IA-NEXT:    mv a2, s2
 ; RV32IA-NEXT:    mv a3, s1
 ; RV32IA-NEXT:    j .LBB208_1
@@ -16171,10 +16243,9 @@ define i64 @atomicrmw_min_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    ld a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    addi s2, sp, 8
-; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    blt s1, a3, .LBB208_3
+; RV64I-NEXT:    j .LBB208_2
 ; RV64I-NEXT:  .LBB208_1: # %atomicrmw.start
-; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB208_2 Depth=1
 ; RV64I-NEXT:    sd a3, 8(sp)
 ; RV64I-NEXT:    addi a3, zero, 4
 ; RV64I-NEXT:    addi a4, zero, 2
@@ -16183,11 +16254,12 @@ define i64 @atomicrmw_min_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    call __atomic_compare_exchange_8
 ; RV64I-NEXT:    ld a3, 8(sp)
 ; RV64I-NEXT:    bnez a0, .LBB208_4
-; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB208_1 Depth=1
+; RV64I-NEXT:  .LBB208_2: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    mv a2, a3
 ; RV64I-NEXT:    bge s1, a3, .LBB208_1
-; RV64I-NEXT:  .LBB208_3: # %atomicrmw.start
+; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB208_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
 ; RV64I-NEXT:    j .LBB208_1
 ; RV64I-NEXT:  .LBB208_4: # %atomicrmw.end
@@ -16222,9 +16294,9 @@ define i64 @atomicrmw_min_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    mv s1, a2
 ; RV32I-NEXT:    mv s2, a1
 ; RV32I-NEXT:    mv s3, sp
-; RV32I-NEXT:    bne a5, s1, .LBB209_3
-; RV32I-NEXT:    j .LBB209_4
+; RV32I-NEXT:    j .LBB209_2
 ; RV32I-NEXT:  .LBB209_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB209_2 Depth=1
 ; RV32I-NEXT:    sw a4, 0(sp)
 ; RV32I-NEXT:    sw a5, 4(sp)
 ; RV32I-NEXT:    addi a4, zero, 5
@@ -16235,19 +16307,23 @@ define i64 @atomicrmw_min_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    lw a5, 4(sp)
 ; RV32I-NEXT:    lw a4, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB209_7
-; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:  .LBB209_2: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    beq a5, s1, .LBB209_4
-; RV32I-NEXT:  .LBB209_3: # %atomicrmw.start
+; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB209_2 Depth=1
 ; RV32I-NEXT:    slt a0, s1, a5
 ; RV32I-NEXT:    j .LBB209_5
 ; RV32I-NEXT:  .LBB209_4:
 ; RV32I-NEXT:    sltu a0, s2, a4
 ; RV32I-NEXT:  .LBB209_5: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB209_2 Depth=1
 ; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:    mv a2, a4
 ; RV32I-NEXT:    mv a3, a5
 ; RV32I-NEXT:    bnez a0, .LBB209_1
 ; RV32I-NEXT:  # %bb.6: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB209_2 Depth=1
 ; RV32I-NEXT:    mv a2, s2
 ; RV32I-NEXT:    mv a3, s1
 ; RV32I-NEXT:    j .LBB209_1
@@ -16276,9 +16352,9 @@ define i64 @atomicrmw_min_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    mv s1, a2
 ; RV32IA-NEXT:    mv s2, a1
 ; RV32IA-NEXT:    mv s3, sp
-; RV32IA-NEXT:    bne a5, s1, .LBB209_3
-; RV32IA-NEXT:    j .LBB209_4
+; RV32IA-NEXT:    j .LBB209_2
 ; RV32IA-NEXT:  .LBB209_1: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB209_2 Depth=1
 ; RV32IA-NEXT:    sw a4, 0(sp)
 ; RV32IA-NEXT:    sw a5, 4(sp)
 ; RV32IA-NEXT:    addi a4, zero, 5
@@ -16289,19 +16365,23 @@ define i64 @atomicrmw_min_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    lw a5, 4(sp)
 ; RV32IA-NEXT:    lw a4, 0(sp)
 ; RV32IA-NEXT:    bnez a0, .LBB209_7
-; RV32IA-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32IA-NEXT:  .LBB209_2: # %atomicrmw.start
+; RV32IA-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    beq a5, s1, .LBB209_4
-; RV32IA-NEXT:  .LBB209_3: # %atomicrmw.start
+; RV32IA-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB209_2 Depth=1
 ; RV32IA-NEXT:    slt a0, s1, a5
 ; RV32IA-NEXT:    j .LBB209_5
 ; RV32IA-NEXT:  .LBB209_4:
 ; RV32IA-NEXT:    sltu a0, s2, a4
 ; RV32IA-NEXT:  .LBB209_5: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB209_2 Depth=1
 ; RV32IA-NEXT:    xori a0, a0, 1
 ; RV32IA-NEXT:    mv a2, a4
 ; RV32IA-NEXT:    mv a3, a5
 ; RV32IA-NEXT:    bnez a0, .LBB209_1
 ; RV32IA-NEXT:  # %bb.6: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB209_2 Depth=1
 ; RV32IA-NEXT:    mv a2, s2
 ; RV32IA-NEXT:    mv a3, s1
 ; RV32IA-NEXT:    j .LBB209_1
@@ -16327,10 +16407,9 @@ define i64 @atomicrmw_min_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    ld a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    addi s2, sp, 8
-; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    blt s1, a3, .LBB209_3
+; RV64I-NEXT:    j .LBB209_2
 ; RV64I-NEXT:  .LBB209_1: # %atomicrmw.start
-; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB209_2 Depth=1
 ; RV64I-NEXT:    sd a3, 8(sp)
 ; RV64I-NEXT:    addi a3, zero, 5
 ; RV64I-NEXT:    addi a4, zero, 5
@@ -16339,11 +16418,12 @@ define i64 @atomicrmw_min_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    call __atomic_compare_exchange_8
 ; RV64I-NEXT:    ld a3, 8(sp)
 ; RV64I-NEXT:    bnez a0, .LBB209_4
-; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB209_1 Depth=1
+; RV64I-NEXT:  .LBB209_2: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    mv a2, a3
 ; RV64I-NEXT:    bge s1, a3, .LBB209_1
-; RV64I-NEXT:  .LBB209_3: # %atomicrmw.start
+; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB209_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
 ; RV64I-NEXT:    j .LBB209_1
 ; RV64I-NEXT:  .LBB209_4: # %atomicrmw.end
@@ -16378,9 +16458,9 @@ define i64 @atomicrmw_umax_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    mv s1, a2
 ; RV32I-NEXT:    mv s2, a1
 ; RV32I-NEXT:    mv s3, sp
-; RV32I-NEXT:    bne a5, s1, .LBB210_3
-; RV32I-NEXT:    j .LBB210_4
+; RV32I-NEXT:    j .LBB210_2
 ; RV32I-NEXT:  .LBB210_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB210_2 Depth=1
 ; RV32I-NEXT:    sw a4, 0(sp)
 ; RV32I-NEXT:    sw a5, 4(sp)
 ; RV32I-NEXT:    mv a0, s0
@@ -16391,18 +16471,22 @@ define i64 @atomicrmw_umax_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    lw a5, 4(sp)
 ; RV32I-NEXT:    lw a4, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB210_7
-; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:  .LBB210_2: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    beq a5, s1, .LBB210_4
-; RV32I-NEXT:  .LBB210_3: # %atomicrmw.start
+; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB210_2 Depth=1
 ; RV32I-NEXT:    sltu a0, s1, a5
 ; RV32I-NEXT:    j .LBB210_5
 ; RV32I-NEXT:  .LBB210_4:
 ; RV32I-NEXT:    sltu a0, s2, a4
 ; RV32I-NEXT:  .LBB210_5: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB210_2 Depth=1
 ; RV32I-NEXT:    mv a2, a4
 ; RV32I-NEXT:    mv a3, a5
 ; RV32I-NEXT:    bnez a0, .LBB210_1
 ; RV32I-NEXT:  # %bb.6: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB210_2 Depth=1
 ; RV32I-NEXT:    mv a2, s2
 ; RV32I-NEXT:    mv a3, s1
 ; RV32I-NEXT:    j .LBB210_1
@@ -16431,9 +16515,9 @@ define i64 @atomicrmw_umax_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    mv s1, a2
 ; RV32IA-NEXT:    mv s2, a1
 ; RV32IA-NEXT:    mv s3, sp
-; RV32IA-NEXT:    bne a5, s1, .LBB210_3
-; RV32IA-NEXT:    j .LBB210_4
+; RV32IA-NEXT:    j .LBB210_2
 ; RV32IA-NEXT:  .LBB210_1: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB210_2 Depth=1
 ; RV32IA-NEXT:    sw a4, 0(sp)
 ; RV32IA-NEXT:    sw a5, 4(sp)
 ; RV32IA-NEXT:    mv a0, s0
@@ -16444,18 +16528,22 @@ define i64 @atomicrmw_umax_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    lw a5, 4(sp)
 ; RV32IA-NEXT:    lw a4, 0(sp)
 ; RV32IA-NEXT:    bnez a0, .LBB210_7
-; RV32IA-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32IA-NEXT:  .LBB210_2: # %atomicrmw.start
+; RV32IA-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    beq a5, s1, .LBB210_4
-; RV32IA-NEXT:  .LBB210_3: # %atomicrmw.start
+; RV32IA-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB210_2 Depth=1
 ; RV32IA-NEXT:    sltu a0, s1, a5
 ; RV32IA-NEXT:    j .LBB210_5
 ; RV32IA-NEXT:  .LBB210_4:
 ; RV32IA-NEXT:    sltu a0, s2, a4
 ; RV32IA-NEXT:  .LBB210_5: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB210_2 Depth=1
 ; RV32IA-NEXT:    mv a2, a4
 ; RV32IA-NEXT:    mv a3, a5
 ; RV32IA-NEXT:    bnez a0, .LBB210_1
 ; RV32IA-NEXT:  # %bb.6: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB210_2 Depth=1
 ; RV32IA-NEXT:    mv a2, s2
 ; RV32IA-NEXT:    mv a3, s1
 ; RV32IA-NEXT:    j .LBB210_1
@@ -16481,10 +16569,9 @@ define i64 @atomicrmw_umax_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    ld a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    addi s2, sp, 8
-; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    bgeu s1, a3, .LBB210_3
+; RV64I-NEXT:    j .LBB210_2
 ; RV64I-NEXT:  .LBB210_1: # %atomicrmw.start
-; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB210_2 Depth=1
 ; RV64I-NEXT:    sd a3, 8(sp)
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    mv a1, s2
@@ -16493,11 +16580,12 @@ define i64 @atomicrmw_umax_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    call __atomic_compare_exchange_8
 ; RV64I-NEXT:    ld a3, 8(sp)
 ; RV64I-NEXT:    bnez a0, .LBB210_4
-; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB210_1 Depth=1
+; RV64I-NEXT:  .LBB210_2: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    mv a2, a3
 ; RV64I-NEXT:    bltu s1, a3, .LBB210_1
-; RV64I-NEXT:  .LBB210_3: # %atomicrmw.start
+; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB210_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
 ; RV64I-NEXT:    j .LBB210_1
 ; RV64I-NEXT:  .LBB210_4: # %atomicrmw.end
@@ -16532,9 +16620,9 @@ define i64 @atomicrmw_umax_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    mv s1, a2
 ; RV32I-NEXT:    mv s2, a1
 ; RV32I-NEXT:    mv s3, sp
-; RV32I-NEXT:    bne a5, s1, .LBB211_3
-; RV32I-NEXT:    j .LBB211_4
+; RV32I-NEXT:    j .LBB211_2
 ; RV32I-NEXT:  .LBB211_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB211_2 Depth=1
 ; RV32I-NEXT:    sw a4, 0(sp)
 ; RV32I-NEXT:    sw a5, 4(sp)
 ; RV32I-NEXT:    addi a4, zero, 2
@@ -16545,18 +16633,22 @@ define i64 @atomicrmw_umax_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    lw a5, 4(sp)
 ; RV32I-NEXT:    lw a4, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB211_7
-; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:  .LBB211_2: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    beq a5, s1, .LBB211_4
-; RV32I-NEXT:  .LBB211_3: # %atomicrmw.start
+; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB211_2 Depth=1
 ; RV32I-NEXT:    sltu a0, s1, a5
 ; RV32I-NEXT:    j .LBB211_5
 ; RV32I-NEXT:  .LBB211_4:
 ; RV32I-NEXT:    sltu a0, s2, a4
 ; RV32I-NEXT:  .LBB211_5: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB211_2 Depth=1
 ; RV32I-NEXT:    mv a2, a4
 ; RV32I-NEXT:    mv a3, a5
 ; RV32I-NEXT:    bnez a0, .LBB211_1
 ; RV32I-NEXT:  # %bb.6: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB211_2 Depth=1
 ; RV32I-NEXT:    mv a2, s2
 ; RV32I-NEXT:    mv a3, s1
 ; RV32I-NEXT:    j .LBB211_1
@@ -16585,9 +16677,9 @@ define i64 @atomicrmw_umax_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    mv s1, a2
 ; RV32IA-NEXT:    mv s2, a1
 ; RV32IA-NEXT:    mv s3, sp
-; RV32IA-NEXT:    bne a5, s1, .LBB211_3
-; RV32IA-NEXT:    j .LBB211_4
+; RV32IA-NEXT:    j .LBB211_2
 ; RV32IA-NEXT:  .LBB211_1: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB211_2 Depth=1
 ; RV32IA-NEXT:    sw a4, 0(sp)
 ; RV32IA-NEXT:    sw a5, 4(sp)
 ; RV32IA-NEXT:    addi a4, zero, 2
@@ -16598,18 +16690,22 @@ define i64 @atomicrmw_umax_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    lw a5, 4(sp)
 ; RV32IA-NEXT:    lw a4, 0(sp)
 ; RV32IA-NEXT:    bnez a0, .LBB211_7
-; RV32IA-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32IA-NEXT:  .LBB211_2: # %atomicrmw.start
+; RV32IA-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    beq a5, s1, .LBB211_4
-; RV32IA-NEXT:  .LBB211_3: # %atomicrmw.start
+; RV32IA-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB211_2 Depth=1
 ; RV32IA-NEXT:    sltu a0, s1, a5
 ; RV32IA-NEXT:    j .LBB211_5
 ; RV32IA-NEXT:  .LBB211_4:
 ; RV32IA-NEXT:    sltu a0, s2, a4
 ; RV32IA-NEXT:  .LBB211_5: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB211_2 Depth=1
 ; RV32IA-NEXT:    mv a2, a4
 ; RV32IA-NEXT:    mv a3, a5
 ; RV32IA-NEXT:    bnez a0, .LBB211_1
 ; RV32IA-NEXT:  # %bb.6: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB211_2 Depth=1
 ; RV32IA-NEXT:    mv a2, s2
 ; RV32IA-NEXT:    mv a3, s1
 ; RV32IA-NEXT:    j .LBB211_1
@@ -16635,10 +16731,9 @@ define i64 @atomicrmw_umax_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    ld a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    addi s2, sp, 8
-; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    bgeu s1, a3, .LBB211_3
+; RV64I-NEXT:    j .LBB211_2
 ; RV64I-NEXT:  .LBB211_1: # %atomicrmw.start
-; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB211_2 Depth=1
 ; RV64I-NEXT:    sd a3, 8(sp)
 ; RV64I-NEXT:    addi a3, zero, 2
 ; RV64I-NEXT:    addi a4, zero, 2
@@ -16647,11 +16742,12 @@ define i64 @atomicrmw_umax_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    call __atomic_compare_exchange_8
 ; RV64I-NEXT:    ld a3, 8(sp)
 ; RV64I-NEXT:    bnez a0, .LBB211_4
-; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB211_1 Depth=1
+; RV64I-NEXT:  .LBB211_2: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    mv a2, a3
 ; RV64I-NEXT:    bltu s1, a3, .LBB211_1
-; RV64I-NEXT:  .LBB211_3: # %atomicrmw.start
+; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB211_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
 ; RV64I-NEXT:    j .LBB211_1
 ; RV64I-NEXT:  .LBB211_4: # %atomicrmw.end
@@ -16686,9 +16782,9 @@ define i64 @atomicrmw_umax_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    mv s1, a2
 ; RV32I-NEXT:    mv s2, a1
 ; RV32I-NEXT:    mv s3, sp
-; RV32I-NEXT:    bne a5, s1, .LBB212_3
-; RV32I-NEXT:    j .LBB212_4
+; RV32I-NEXT:    j .LBB212_2
 ; RV32I-NEXT:  .LBB212_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB212_2 Depth=1
 ; RV32I-NEXT:    sw a4, 0(sp)
 ; RV32I-NEXT:    sw a5, 4(sp)
 ; RV32I-NEXT:    addi a4, zero, 3
@@ -16699,18 +16795,22 @@ define i64 @atomicrmw_umax_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    lw a5, 4(sp)
 ; RV32I-NEXT:    lw a4, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB212_7
-; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:  .LBB212_2: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    beq a5, s1, .LBB212_4
-; RV32I-NEXT:  .LBB212_3: # %atomicrmw.start
+; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB212_2 Depth=1
 ; RV32I-NEXT:    sltu a0, s1, a5
 ; RV32I-NEXT:    j .LBB212_5
 ; RV32I-NEXT:  .LBB212_4:
 ; RV32I-NEXT:    sltu a0, s2, a4
 ; RV32I-NEXT:  .LBB212_5: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB212_2 Depth=1
 ; RV32I-NEXT:    mv a2, a4
 ; RV32I-NEXT:    mv a3, a5
 ; RV32I-NEXT:    bnez a0, .LBB212_1
 ; RV32I-NEXT:  # %bb.6: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB212_2 Depth=1
 ; RV32I-NEXT:    mv a2, s2
 ; RV32I-NEXT:    mv a3, s1
 ; RV32I-NEXT:    j .LBB212_1
@@ -16739,9 +16839,9 @@ define i64 @atomicrmw_umax_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    mv s1, a2
 ; RV32IA-NEXT:    mv s2, a1
 ; RV32IA-NEXT:    mv s3, sp
-; RV32IA-NEXT:    bne a5, s1, .LBB212_3
-; RV32IA-NEXT:    j .LBB212_4
+; RV32IA-NEXT:    j .LBB212_2
 ; RV32IA-NEXT:  .LBB212_1: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB212_2 Depth=1
 ; RV32IA-NEXT:    sw a4, 0(sp)
 ; RV32IA-NEXT:    sw a5, 4(sp)
 ; RV32IA-NEXT:    addi a4, zero, 3
@@ -16752,18 +16852,22 @@ define i64 @atomicrmw_umax_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    lw a5, 4(sp)
 ; RV32IA-NEXT:    lw a4, 0(sp)
 ; RV32IA-NEXT:    bnez a0, .LBB212_7
-; RV32IA-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32IA-NEXT:  .LBB212_2: # %atomicrmw.start
+; RV32IA-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    beq a5, s1, .LBB212_4
-; RV32IA-NEXT:  .LBB212_3: # %atomicrmw.start
+; RV32IA-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB212_2 Depth=1
 ; RV32IA-NEXT:    sltu a0, s1, a5
 ; RV32IA-NEXT:    j .LBB212_5
 ; RV32IA-NEXT:  .LBB212_4:
 ; RV32IA-NEXT:    sltu a0, s2, a4
 ; RV32IA-NEXT:  .LBB212_5: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB212_2 Depth=1
 ; RV32IA-NEXT:    mv a2, a4
 ; RV32IA-NEXT:    mv a3, a5
 ; RV32IA-NEXT:    bnez a0, .LBB212_1
 ; RV32IA-NEXT:  # %bb.6: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB212_2 Depth=1
 ; RV32IA-NEXT:    mv a2, s2
 ; RV32IA-NEXT:    mv a3, s1
 ; RV32IA-NEXT:    j .LBB212_1
@@ -16789,10 +16893,9 @@ define i64 @atomicrmw_umax_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    ld a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    addi s2, sp, 8
-; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    bgeu s1, a3, .LBB212_3
+; RV64I-NEXT:    j .LBB212_2
 ; RV64I-NEXT:  .LBB212_1: # %atomicrmw.start
-; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB212_2 Depth=1
 ; RV64I-NEXT:    sd a3, 8(sp)
 ; RV64I-NEXT:    addi a3, zero, 3
 ; RV64I-NEXT:    mv a0, s0
@@ -16801,11 +16904,12 @@ define i64 @atomicrmw_umax_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    call __atomic_compare_exchange_8
 ; RV64I-NEXT:    ld a3, 8(sp)
 ; RV64I-NEXT:    bnez a0, .LBB212_4
-; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB212_1 Depth=1
+; RV64I-NEXT:  .LBB212_2: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    mv a2, a3
 ; RV64I-NEXT:    bltu s1, a3, .LBB212_1
-; RV64I-NEXT:  .LBB212_3: # %atomicrmw.start
+; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB212_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
 ; RV64I-NEXT:    j .LBB212_1
 ; RV64I-NEXT:  .LBB212_4: # %atomicrmw.end
@@ -16840,9 +16944,9 @@ define i64 @atomicrmw_umax_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    mv s1, a2
 ; RV32I-NEXT:    mv s2, a1
 ; RV32I-NEXT:    mv s3, sp
-; RV32I-NEXT:    bne a5, s1, .LBB213_3
-; RV32I-NEXT:    j .LBB213_4
+; RV32I-NEXT:    j .LBB213_2
 ; RV32I-NEXT:  .LBB213_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB213_2 Depth=1
 ; RV32I-NEXT:    sw a4, 0(sp)
 ; RV32I-NEXT:    sw a5, 4(sp)
 ; RV32I-NEXT:    addi a4, zero, 4
@@ -16853,18 +16957,22 @@ define i64 @atomicrmw_umax_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    lw a5, 4(sp)
 ; RV32I-NEXT:    lw a4, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB213_7
-; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:  .LBB213_2: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    beq a5, s1, .LBB213_4
-; RV32I-NEXT:  .LBB213_3: # %atomicrmw.start
+; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB213_2 Depth=1
 ; RV32I-NEXT:    sltu a0, s1, a5
 ; RV32I-NEXT:    j .LBB213_5
 ; RV32I-NEXT:  .LBB213_4:
 ; RV32I-NEXT:    sltu a0, s2, a4
 ; RV32I-NEXT:  .LBB213_5: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB213_2 Depth=1
 ; RV32I-NEXT:    mv a2, a4
 ; RV32I-NEXT:    mv a3, a5
 ; RV32I-NEXT:    bnez a0, .LBB213_1
 ; RV32I-NEXT:  # %bb.6: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB213_2 Depth=1
 ; RV32I-NEXT:    mv a2, s2
 ; RV32I-NEXT:    mv a3, s1
 ; RV32I-NEXT:    j .LBB213_1
@@ -16893,9 +17001,9 @@ define i64 @atomicrmw_umax_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    mv s1, a2
 ; RV32IA-NEXT:    mv s2, a1
 ; RV32IA-NEXT:    mv s3, sp
-; RV32IA-NEXT:    bne a5, s1, .LBB213_3
-; RV32IA-NEXT:    j .LBB213_4
+; RV32IA-NEXT:    j .LBB213_2
 ; RV32IA-NEXT:  .LBB213_1: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB213_2 Depth=1
 ; RV32IA-NEXT:    sw a4, 0(sp)
 ; RV32IA-NEXT:    sw a5, 4(sp)
 ; RV32IA-NEXT:    addi a4, zero, 4
@@ -16906,18 +17014,22 @@ define i64 @atomicrmw_umax_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    lw a5, 4(sp)
 ; RV32IA-NEXT:    lw a4, 0(sp)
 ; RV32IA-NEXT:    bnez a0, .LBB213_7
-; RV32IA-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32IA-NEXT:  .LBB213_2: # %atomicrmw.start
+; RV32IA-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    beq a5, s1, .LBB213_4
-; RV32IA-NEXT:  .LBB213_3: # %atomicrmw.start
+; RV32IA-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB213_2 Depth=1
 ; RV32IA-NEXT:    sltu a0, s1, a5
 ; RV32IA-NEXT:    j .LBB213_5
 ; RV32IA-NEXT:  .LBB213_4:
 ; RV32IA-NEXT:    sltu a0, s2, a4
 ; RV32IA-NEXT:  .LBB213_5: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB213_2 Depth=1
 ; RV32IA-NEXT:    mv a2, a4
 ; RV32IA-NEXT:    mv a3, a5
 ; RV32IA-NEXT:    bnez a0, .LBB213_1
 ; RV32IA-NEXT:  # %bb.6: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB213_2 Depth=1
 ; RV32IA-NEXT:    mv a2, s2
 ; RV32IA-NEXT:    mv a3, s1
 ; RV32IA-NEXT:    j .LBB213_1
@@ -16943,10 +17055,9 @@ define i64 @atomicrmw_umax_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    ld a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    addi s2, sp, 8
-; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    bgeu s1, a3, .LBB213_3
+; RV64I-NEXT:    j .LBB213_2
 ; RV64I-NEXT:  .LBB213_1: # %atomicrmw.start
-; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB213_2 Depth=1
 ; RV64I-NEXT:    sd a3, 8(sp)
 ; RV64I-NEXT:    addi a3, zero, 4
 ; RV64I-NEXT:    addi a4, zero, 2
@@ -16955,11 +17066,12 @@ define i64 @atomicrmw_umax_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    call __atomic_compare_exchange_8
 ; RV64I-NEXT:    ld a3, 8(sp)
 ; RV64I-NEXT:    bnez a0, .LBB213_4
-; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB213_1 Depth=1
+; RV64I-NEXT:  .LBB213_2: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    mv a2, a3
 ; RV64I-NEXT:    bltu s1, a3, .LBB213_1
-; RV64I-NEXT:  .LBB213_3: # %atomicrmw.start
+; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB213_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
 ; RV64I-NEXT:    j .LBB213_1
 ; RV64I-NEXT:  .LBB213_4: # %atomicrmw.end
@@ -16994,9 +17106,9 @@ define i64 @atomicrmw_umax_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    mv s1, a2
 ; RV32I-NEXT:    mv s2, a1
 ; RV32I-NEXT:    mv s3, sp
-; RV32I-NEXT:    bne a5, s1, .LBB214_3
-; RV32I-NEXT:    j .LBB214_4
+; RV32I-NEXT:    j .LBB214_2
 ; RV32I-NEXT:  .LBB214_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB214_2 Depth=1
 ; RV32I-NEXT:    sw a4, 0(sp)
 ; RV32I-NEXT:    sw a5, 4(sp)
 ; RV32I-NEXT:    addi a4, zero, 5
@@ -17007,18 +17119,22 @@ define i64 @atomicrmw_umax_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    lw a5, 4(sp)
 ; RV32I-NEXT:    lw a4, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB214_7
-; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:  .LBB214_2: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    beq a5, s1, .LBB214_4
-; RV32I-NEXT:  .LBB214_3: # %atomicrmw.start
+; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB214_2 Depth=1
 ; RV32I-NEXT:    sltu a0, s1, a5
 ; RV32I-NEXT:    j .LBB214_5
 ; RV32I-NEXT:  .LBB214_4:
 ; RV32I-NEXT:    sltu a0, s2, a4
 ; RV32I-NEXT:  .LBB214_5: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB214_2 Depth=1
 ; RV32I-NEXT:    mv a2, a4
 ; RV32I-NEXT:    mv a3, a5
 ; RV32I-NEXT:    bnez a0, .LBB214_1
 ; RV32I-NEXT:  # %bb.6: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB214_2 Depth=1
 ; RV32I-NEXT:    mv a2, s2
 ; RV32I-NEXT:    mv a3, s1
 ; RV32I-NEXT:    j .LBB214_1
@@ -17047,9 +17163,9 @@ define i64 @atomicrmw_umax_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    mv s1, a2
 ; RV32IA-NEXT:    mv s2, a1
 ; RV32IA-NEXT:    mv s3, sp
-; RV32IA-NEXT:    bne a5, s1, .LBB214_3
-; RV32IA-NEXT:    j .LBB214_4
+; RV32IA-NEXT:    j .LBB214_2
 ; RV32IA-NEXT:  .LBB214_1: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB214_2 Depth=1
 ; RV32IA-NEXT:    sw a4, 0(sp)
 ; RV32IA-NEXT:    sw a5, 4(sp)
 ; RV32IA-NEXT:    addi a4, zero, 5
@@ -17060,18 +17176,22 @@ define i64 @atomicrmw_umax_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    lw a5, 4(sp)
 ; RV32IA-NEXT:    lw a4, 0(sp)
 ; RV32IA-NEXT:    bnez a0, .LBB214_7
-; RV32IA-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32IA-NEXT:  .LBB214_2: # %atomicrmw.start
+; RV32IA-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    beq a5, s1, .LBB214_4
-; RV32IA-NEXT:  .LBB214_3: # %atomicrmw.start
+; RV32IA-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB214_2 Depth=1
 ; RV32IA-NEXT:    sltu a0, s1, a5
 ; RV32IA-NEXT:    j .LBB214_5
 ; RV32IA-NEXT:  .LBB214_4:
 ; RV32IA-NEXT:    sltu a0, s2, a4
 ; RV32IA-NEXT:  .LBB214_5: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB214_2 Depth=1
 ; RV32IA-NEXT:    mv a2, a4
 ; RV32IA-NEXT:    mv a3, a5
 ; RV32IA-NEXT:    bnez a0, .LBB214_1
 ; RV32IA-NEXT:  # %bb.6: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB214_2 Depth=1
 ; RV32IA-NEXT:    mv a2, s2
 ; RV32IA-NEXT:    mv a3, s1
 ; RV32IA-NEXT:    j .LBB214_1
@@ -17097,10 +17217,9 @@ define i64 @atomicrmw_umax_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    ld a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    addi s2, sp, 8
-; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    bgeu s1, a3, .LBB214_3
+; RV64I-NEXT:    j .LBB214_2
 ; RV64I-NEXT:  .LBB214_1: # %atomicrmw.start
-; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB214_2 Depth=1
 ; RV64I-NEXT:    sd a3, 8(sp)
 ; RV64I-NEXT:    addi a3, zero, 5
 ; RV64I-NEXT:    addi a4, zero, 5
@@ -17109,11 +17228,12 @@ define i64 @atomicrmw_umax_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    call __atomic_compare_exchange_8
 ; RV64I-NEXT:    ld a3, 8(sp)
 ; RV64I-NEXT:    bnez a0, .LBB214_4
-; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB214_1 Depth=1
+; RV64I-NEXT:  .LBB214_2: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    mv a2, a3
 ; RV64I-NEXT:    bltu s1, a3, .LBB214_1
-; RV64I-NEXT:  .LBB214_3: # %atomicrmw.start
+; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB214_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
 ; RV64I-NEXT:    j .LBB214_1
 ; RV64I-NEXT:  .LBB214_4: # %atomicrmw.end
@@ -17148,9 +17268,9 @@ define i64 @atomicrmw_umin_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    mv s1, a2
 ; RV32I-NEXT:    mv s2, a1
 ; RV32I-NEXT:    mv s3, sp
-; RV32I-NEXT:    bne a5, s1, .LBB215_3
-; RV32I-NEXT:    j .LBB215_4
+; RV32I-NEXT:    j .LBB215_2
 ; RV32I-NEXT:  .LBB215_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB215_2 Depth=1
 ; RV32I-NEXT:    sw a4, 0(sp)
 ; RV32I-NEXT:    sw a5, 4(sp)
 ; RV32I-NEXT:    mv a0, s0
@@ -17161,19 +17281,23 @@ define i64 @atomicrmw_umin_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    lw a5, 4(sp)
 ; RV32I-NEXT:    lw a4, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB215_7
-; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:  .LBB215_2: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    beq a5, s1, .LBB215_4
-; RV32I-NEXT:  .LBB215_3: # %atomicrmw.start
+; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB215_2 Depth=1
 ; RV32I-NEXT:    sltu a0, s1, a5
 ; RV32I-NEXT:    j .LBB215_5
 ; RV32I-NEXT:  .LBB215_4:
 ; RV32I-NEXT:    sltu a0, s2, a4
 ; RV32I-NEXT:  .LBB215_5: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB215_2 Depth=1
 ; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:    mv a2, a4
 ; RV32I-NEXT:    mv a3, a5
 ; RV32I-NEXT:    bnez a0, .LBB215_1
 ; RV32I-NEXT:  # %bb.6: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB215_2 Depth=1
 ; RV32I-NEXT:    mv a2, s2
 ; RV32I-NEXT:    mv a3, s1
 ; RV32I-NEXT:    j .LBB215_1
@@ -17202,9 +17326,9 @@ define i64 @atomicrmw_umin_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    mv s1, a2
 ; RV32IA-NEXT:    mv s2, a1
 ; RV32IA-NEXT:    mv s3, sp
-; RV32IA-NEXT:    bne a5, s1, .LBB215_3
-; RV32IA-NEXT:    j .LBB215_4
+; RV32IA-NEXT:    j .LBB215_2
 ; RV32IA-NEXT:  .LBB215_1: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB215_2 Depth=1
 ; RV32IA-NEXT:    sw a4, 0(sp)
 ; RV32IA-NEXT:    sw a5, 4(sp)
 ; RV32IA-NEXT:    mv a0, s0
@@ -17215,19 +17339,23 @@ define i64 @atomicrmw_umin_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    lw a5, 4(sp)
 ; RV32IA-NEXT:    lw a4, 0(sp)
 ; RV32IA-NEXT:    bnez a0, .LBB215_7
-; RV32IA-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32IA-NEXT:  .LBB215_2: # %atomicrmw.start
+; RV32IA-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    beq a5, s1, .LBB215_4
-; RV32IA-NEXT:  .LBB215_3: # %atomicrmw.start
+; RV32IA-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB215_2 Depth=1
 ; RV32IA-NEXT:    sltu a0, s1, a5
 ; RV32IA-NEXT:    j .LBB215_5
 ; RV32IA-NEXT:  .LBB215_4:
 ; RV32IA-NEXT:    sltu a0, s2, a4
 ; RV32IA-NEXT:  .LBB215_5: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB215_2 Depth=1
 ; RV32IA-NEXT:    xori a0, a0, 1
 ; RV32IA-NEXT:    mv a2, a4
 ; RV32IA-NEXT:    mv a3, a5
 ; RV32IA-NEXT:    bnez a0, .LBB215_1
 ; RV32IA-NEXT:  # %bb.6: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB215_2 Depth=1
 ; RV32IA-NEXT:    mv a2, s2
 ; RV32IA-NEXT:    mv a3, s1
 ; RV32IA-NEXT:    j .LBB215_1
@@ -17253,10 +17381,9 @@ define i64 @atomicrmw_umin_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    ld a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    addi s2, sp, 8
-; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    bltu s1, a3, .LBB215_3
+; RV64I-NEXT:    j .LBB215_2
 ; RV64I-NEXT:  .LBB215_1: # %atomicrmw.start
-; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB215_2 Depth=1
 ; RV64I-NEXT:    sd a3, 8(sp)
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    mv a1, s2
@@ -17265,11 +17392,12 @@ define i64 @atomicrmw_umin_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    call __atomic_compare_exchange_8
 ; RV64I-NEXT:    ld a3, 8(sp)
 ; RV64I-NEXT:    bnez a0, .LBB215_4
-; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB215_1 Depth=1
+; RV64I-NEXT:  .LBB215_2: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    mv a2, a3
 ; RV64I-NEXT:    bgeu s1, a3, .LBB215_1
-; RV64I-NEXT:  .LBB215_3: # %atomicrmw.start
+; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB215_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
 ; RV64I-NEXT:    j .LBB215_1
 ; RV64I-NEXT:  .LBB215_4: # %atomicrmw.end
@@ -17304,9 +17432,9 @@ define i64 @atomicrmw_umin_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    mv s1, a2
 ; RV32I-NEXT:    mv s2, a1
 ; RV32I-NEXT:    mv s3, sp
-; RV32I-NEXT:    bne a5, s1, .LBB216_3
-; RV32I-NEXT:    j .LBB216_4
+; RV32I-NEXT:    j .LBB216_2
 ; RV32I-NEXT:  .LBB216_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB216_2 Depth=1
 ; RV32I-NEXT:    sw a4, 0(sp)
 ; RV32I-NEXT:    sw a5, 4(sp)
 ; RV32I-NEXT:    addi a4, zero, 2
@@ -17317,19 +17445,23 @@ define i64 @atomicrmw_umin_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    lw a5, 4(sp)
 ; RV32I-NEXT:    lw a4, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB216_7
-; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:  .LBB216_2: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    beq a5, s1, .LBB216_4
-; RV32I-NEXT:  .LBB216_3: # %atomicrmw.start
+; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB216_2 Depth=1
 ; RV32I-NEXT:    sltu a0, s1, a5
 ; RV32I-NEXT:    j .LBB216_5
 ; RV32I-NEXT:  .LBB216_4:
 ; RV32I-NEXT:    sltu a0, s2, a4
 ; RV32I-NEXT:  .LBB216_5: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB216_2 Depth=1
 ; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:    mv a2, a4
 ; RV32I-NEXT:    mv a3, a5
 ; RV32I-NEXT:    bnez a0, .LBB216_1
 ; RV32I-NEXT:  # %bb.6: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB216_2 Depth=1
 ; RV32I-NEXT:    mv a2, s2
 ; RV32I-NEXT:    mv a3, s1
 ; RV32I-NEXT:    j .LBB216_1
@@ -17358,9 +17490,9 @@ define i64 @atomicrmw_umin_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    mv s1, a2
 ; RV32IA-NEXT:    mv s2, a1
 ; RV32IA-NEXT:    mv s3, sp
-; RV32IA-NEXT:    bne a5, s1, .LBB216_3
-; RV32IA-NEXT:    j .LBB216_4
+; RV32IA-NEXT:    j .LBB216_2
 ; RV32IA-NEXT:  .LBB216_1: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB216_2 Depth=1
 ; RV32IA-NEXT:    sw a4, 0(sp)
 ; RV32IA-NEXT:    sw a5, 4(sp)
 ; RV32IA-NEXT:    addi a4, zero, 2
@@ -17371,19 +17503,23 @@ define i64 @atomicrmw_umin_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    lw a5, 4(sp)
 ; RV32IA-NEXT:    lw a4, 0(sp)
 ; RV32IA-NEXT:    bnez a0, .LBB216_7
-; RV32IA-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32IA-NEXT:  .LBB216_2: # %atomicrmw.start
+; RV32IA-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    beq a5, s1, .LBB216_4
-; RV32IA-NEXT:  .LBB216_3: # %atomicrmw.start
+; RV32IA-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB216_2 Depth=1
 ; RV32IA-NEXT:    sltu a0, s1, a5
 ; RV32IA-NEXT:    j .LBB216_5
 ; RV32IA-NEXT:  .LBB216_4:
 ; RV32IA-NEXT:    sltu a0, s2, a4
 ; RV32IA-NEXT:  .LBB216_5: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB216_2 Depth=1
 ; RV32IA-NEXT:    xori a0, a0, 1
 ; RV32IA-NEXT:    mv a2, a4
 ; RV32IA-NEXT:    mv a3, a5
 ; RV32IA-NEXT:    bnez a0, .LBB216_1
 ; RV32IA-NEXT:  # %bb.6: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB216_2 Depth=1
 ; RV32IA-NEXT:    mv a2, s2
 ; RV32IA-NEXT:    mv a3, s1
 ; RV32IA-NEXT:    j .LBB216_1
@@ -17409,10 +17545,9 @@ define i64 @atomicrmw_umin_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    ld a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    addi s2, sp, 8
-; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    bltu s1, a3, .LBB216_3
+; RV64I-NEXT:    j .LBB216_2
 ; RV64I-NEXT:  .LBB216_1: # %atomicrmw.start
-; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB216_2 Depth=1
 ; RV64I-NEXT:    sd a3, 8(sp)
 ; RV64I-NEXT:    addi a3, zero, 2
 ; RV64I-NEXT:    addi a4, zero, 2
@@ -17421,11 +17556,12 @@ define i64 @atomicrmw_umin_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    call __atomic_compare_exchange_8
 ; RV64I-NEXT:    ld a3, 8(sp)
 ; RV64I-NEXT:    bnez a0, .LBB216_4
-; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB216_1 Depth=1
+; RV64I-NEXT:  .LBB216_2: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    mv a2, a3
 ; RV64I-NEXT:    bgeu s1, a3, .LBB216_1
-; RV64I-NEXT:  .LBB216_3: # %atomicrmw.start
+; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB216_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
 ; RV64I-NEXT:    j .LBB216_1
 ; RV64I-NEXT:  .LBB216_4: # %atomicrmw.end
@@ -17460,9 +17596,9 @@ define i64 @atomicrmw_umin_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    mv s1, a2
 ; RV32I-NEXT:    mv s2, a1
 ; RV32I-NEXT:    mv s3, sp
-; RV32I-NEXT:    bne a5, s1, .LBB217_3
-; RV32I-NEXT:    j .LBB217_4
+; RV32I-NEXT:    j .LBB217_2
 ; RV32I-NEXT:  .LBB217_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB217_2 Depth=1
 ; RV32I-NEXT:    sw a4, 0(sp)
 ; RV32I-NEXT:    sw a5, 4(sp)
 ; RV32I-NEXT:    addi a4, zero, 3
@@ -17473,19 +17609,23 @@ define i64 @atomicrmw_umin_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    lw a5, 4(sp)
 ; RV32I-NEXT:    lw a4, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB217_7
-; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:  .LBB217_2: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    beq a5, s1, .LBB217_4
-; RV32I-NEXT:  .LBB217_3: # %atomicrmw.start
+; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB217_2 Depth=1
 ; RV32I-NEXT:    sltu a0, s1, a5
 ; RV32I-NEXT:    j .LBB217_5
 ; RV32I-NEXT:  .LBB217_4:
 ; RV32I-NEXT:    sltu a0, s2, a4
 ; RV32I-NEXT:  .LBB217_5: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB217_2 Depth=1
 ; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:    mv a2, a4
 ; RV32I-NEXT:    mv a3, a5
 ; RV32I-NEXT:    bnez a0, .LBB217_1
 ; RV32I-NEXT:  # %bb.6: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB217_2 Depth=1
 ; RV32I-NEXT:    mv a2, s2
 ; RV32I-NEXT:    mv a3, s1
 ; RV32I-NEXT:    j .LBB217_1
@@ -17514,9 +17654,9 @@ define i64 @atomicrmw_umin_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    mv s1, a2
 ; RV32IA-NEXT:    mv s2, a1
 ; RV32IA-NEXT:    mv s3, sp
-; RV32IA-NEXT:    bne a5, s1, .LBB217_3
-; RV32IA-NEXT:    j .LBB217_4
+; RV32IA-NEXT:    j .LBB217_2
 ; RV32IA-NEXT:  .LBB217_1: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB217_2 Depth=1
 ; RV32IA-NEXT:    sw a4, 0(sp)
 ; RV32IA-NEXT:    sw a5, 4(sp)
 ; RV32IA-NEXT:    addi a4, zero, 3
@@ -17527,19 +17667,23 @@ define i64 @atomicrmw_umin_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    lw a5, 4(sp)
 ; RV32IA-NEXT:    lw a4, 0(sp)
 ; RV32IA-NEXT:    bnez a0, .LBB217_7
-; RV32IA-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32IA-NEXT:  .LBB217_2: # %atomicrmw.start
+; RV32IA-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    beq a5, s1, .LBB217_4
-; RV32IA-NEXT:  .LBB217_3: # %atomicrmw.start
+; RV32IA-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB217_2 Depth=1
 ; RV32IA-NEXT:    sltu a0, s1, a5
 ; RV32IA-NEXT:    j .LBB217_5
 ; RV32IA-NEXT:  .LBB217_4:
 ; RV32IA-NEXT:    sltu a0, s2, a4
 ; RV32IA-NEXT:  .LBB217_5: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB217_2 Depth=1
 ; RV32IA-NEXT:    xori a0, a0, 1
 ; RV32IA-NEXT:    mv a2, a4
 ; RV32IA-NEXT:    mv a3, a5
 ; RV32IA-NEXT:    bnez a0, .LBB217_1
 ; RV32IA-NEXT:  # %bb.6: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB217_2 Depth=1
 ; RV32IA-NEXT:    mv a2, s2
 ; RV32IA-NEXT:    mv a3, s1
 ; RV32IA-NEXT:    j .LBB217_1
@@ -17565,10 +17709,9 @@ define i64 @atomicrmw_umin_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    ld a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    addi s2, sp, 8
-; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    bltu s1, a3, .LBB217_3
+; RV64I-NEXT:    j .LBB217_2
 ; RV64I-NEXT:  .LBB217_1: # %atomicrmw.start
-; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB217_2 Depth=1
 ; RV64I-NEXT:    sd a3, 8(sp)
 ; RV64I-NEXT:    addi a3, zero, 3
 ; RV64I-NEXT:    mv a0, s0
@@ -17577,11 +17720,12 @@ define i64 @atomicrmw_umin_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    call __atomic_compare_exchange_8
 ; RV64I-NEXT:    ld a3, 8(sp)
 ; RV64I-NEXT:    bnez a0, .LBB217_4
-; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB217_1 Depth=1
+; RV64I-NEXT:  .LBB217_2: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    mv a2, a3
 ; RV64I-NEXT:    bgeu s1, a3, .LBB217_1
-; RV64I-NEXT:  .LBB217_3: # %atomicrmw.start
+; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB217_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
 ; RV64I-NEXT:    j .LBB217_1
 ; RV64I-NEXT:  .LBB217_4: # %atomicrmw.end
@@ -17616,9 +17760,9 @@ define i64 @atomicrmw_umin_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    mv s1, a2
 ; RV32I-NEXT:    mv s2, a1
 ; RV32I-NEXT:    mv s3, sp
-; RV32I-NEXT:    bne a5, s1, .LBB218_3
-; RV32I-NEXT:    j .LBB218_4
+; RV32I-NEXT:    j .LBB218_2
 ; RV32I-NEXT:  .LBB218_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB218_2 Depth=1
 ; RV32I-NEXT:    sw a4, 0(sp)
 ; RV32I-NEXT:    sw a5, 4(sp)
 ; RV32I-NEXT:    addi a4, zero, 4
@@ -17629,19 +17773,23 @@ define i64 @atomicrmw_umin_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    lw a5, 4(sp)
 ; RV32I-NEXT:    lw a4, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB218_7
-; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:  .LBB218_2: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    beq a5, s1, .LBB218_4
-; RV32I-NEXT:  .LBB218_3: # %atomicrmw.start
+; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB218_2 Depth=1
 ; RV32I-NEXT:    sltu a0, s1, a5
 ; RV32I-NEXT:    j .LBB218_5
 ; RV32I-NEXT:  .LBB218_4:
 ; RV32I-NEXT:    sltu a0, s2, a4
 ; RV32I-NEXT:  .LBB218_5: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB218_2 Depth=1
 ; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:    mv a2, a4
 ; RV32I-NEXT:    mv a3, a5
 ; RV32I-NEXT:    bnez a0, .LBB218_1
 ; RV32I-NEXT:  # %bb.6: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB218_2 Depth=1
 ; RV32I-NEXT:    mv a2, s2
 ; RV32I-NEXT:    mv a3, s1
 ; RV32I-NEXT:    j .LBB218_1
@@ -17670,9 +17818,9 @@ define i64 @atomicrmw_umin_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    mv s1, a2
 ; RV32IA-NEXT:    mv s2, a1
 ; RV32IA-NEXT:    mv s3, sp
-; RV32IA-NEXT:    bne a5, s1, .LBB218_3
-; RV32IA-NEXT:    j .LBB218_4
+; RV32IA-NEXT:    j .LBB218_2
 ; RV32IA-NEXT:  .LBB218_1: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB218_2 Depth=1
 ; RV32IA-NEXT:    sw a4, 0(sp)
 ; RV32IA-NEXT:    sw a5, 4(sp)
 ; RV32IA-NEXT:    addi a4, zero, 4
@@ -17683,19 +17831,23 @@ define i64 @atomicrmw_umin_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    lw a5, 4(sp)
 ; RV32IA-NEXT:    lw a4, 0(sp)
 ; RV32IA-NEXT:    bnez a0, .LBB218_7
-; RV32IA-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32IA-NEXT:  .LBB218_2: # %atomicrmw.start
+; RV32IA-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    beq a5, s1, .LBB218_4
-; RV32IA-NEXT:  .LBB218_3: # %atomicrmw.start
+; RV32IA-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB218_2 Depth=1
 ; RV32IA-NEXT:    sltu a0, s1, a5
 ; RV32IA-NEXT:    j .LBB218_5
 ; RV32IA-NEXT:  .LBB218_4:
 ; RV32IA-NEXT:    sltu a0, s2, a4
 ; RV32IA-NEXT:  .LBB218_5: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB218_2 Depth=1
 ; RV32IA-NEXT:    xori a0, a0, 1
 ; RV32IA-NEXT:    mv a2, a4
 ; RV32IA-NEXT:    mv a3, a5
 ; RV32IA-NEXT:    bnez a0, .LBB218_1
 ; RV32IA-NEXT:  # %bb.6: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB218_2 Depth=1
 ; RV32IA-NEXT:    mv a2, s2
 ; RV32IA-NEXT:    mv a3, s1
 ; RV32IA-NEXT:    j .LBB218_1
@@ -17721,10 +17873,9 @@ define i64 @atomicrmw_umin_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    ld a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    addi s2, sp, 8
-; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    bltu s1, a3, .LBB218_3
+; RV64I-NEXT:    j .LBB218_2
 ; RV64I-NEXT:  .LBB218_1: # %atomicrmw.start
-; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB218_2 Depth=1
 ; RV64I-NEXT:    sd a3, 8(sp)
 ; RV64I-NEXT:    addi a3, zero, 4
 ; RV64I-NEXT:    addi a4, zero, 2
@@ -17733,11 +17884,12 @@ define i64 @atomicrmw_umin_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    call __atomic_compare_exchange_8
 ; RV64I-NEXT:    ld a3, 8(sp)
 ; RV64I-NEXT:    bnez a0, .LBB218_4
-; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB218_1 Depth=1
+; RV64I-NEXT:  .LBB218_2: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    mv a2, a3
 ; RV64I-NEXT:    bgeu s1, a3, .LBB218_1
-; RV64I-NEXT:  .LBB218_3: # %atomicrmw.start
+; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB218_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
 ; RV64I-NEXT:    j .LBB218_1
 ; RV64I-NEXT:  .LBB218_4: # %atomicrmw.end
@@ -17772,9 +17924,9 @@ define i64 @atomicrmw_umin_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    mv s1, a2
 ; RV32I-NEXT:    mv s2, a1
 ; RV32I-NEXT:    mv s3, sp
-; RV32I-NEXT:    bne a5, s1, .LBB219_3
-; RV32I-NEXT:    j .LBB219_4
+; RV32I-NEXT:    j .LBB219_2
 ; RV32I-NEXT:  .LBB219_1: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB219_2 Depth=1
 ; RV32I-NEXT:    sw a4, 0(sp)
 ; RV32I-NEXT:    sw a5, 4(sp)
 ; RV32I-NEXT:    addi a4, zero, 5
@@ -17785,19 +17937,23 @@ define i64 @atomicrmw_umin_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    lw a5, 4(sp)
 ; RV32I-NEXT:    lw a4, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB219_7
-; RV32I-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32I-NEXT:  .LBB219_2: # %atomicrmw.start
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    beq a5, s1, .LBB219_4
-; RV32I-NEXT:  .LBB219_3: # %atomicrmw.start
+; RV32I-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB219_2 Depth=1
 ; RV32I-NEXT:    sltu a0, s1, a5
 ; RV32I-NEXT:    j .LBB219_5
 ; RV32I-NEXT:  .LBB219_4:
 ; RV32I-NEXT:    sltu a0, s2, a4
 ; RV32I-NEXT:  .LBB219_5: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB219_2 Depth=1
 ; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:    mv a2, a4
 ; RV32I-NEXT:    mv a3, a5
 ; RV32I-NEXT:    bnez a0, .LBB219_1
 ; RV32I-NEXT:  # %bb.6: # %atomicrmw.start
+; RV32I-NEXT:    # in Loop: Header=BB219_2 Depth=1
 ; RV32I-NEXT:    mv a2, s2
 ; RV32I-NEXT:    mv a3, s1
 ; RV32I-NEXT:    j .LBB219_1
@@ -17826,9 +17982,9 @@ define i64 @atomicrmw_umin_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    mv s1, a2
 ; RV32IA-NEXT:    mv s2, a1
 ; RV32IA-NEXT:    mv s3, sp
-; RV32IA-NEXT:    bne a5, s1, .LBB219_3
-; RV32IA-NEXT:    j .LBB219_4
+; RV32IA-NEXT:    j .LBB219_2
 ; RV32IA-NEXT:  .LBB219_1: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB219_2 Depth=1
 ; RV32IA-NEXT:    sw a4, 0(sp)
 ; RV32IA-NEXT:    sw a5, 4(sp)
 ; RV32IA-NEXT:    addi a4, zero, 5
@@ -17839,19 +17995,23 @@ define i64 @atomicrmw_umin_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    lw a5, 4(sp)
 ; RV32IA-NEXT:    lw a4, 0(sp)
 ; RV32IA-NEXT:    bnez a0, .LBB219_7
-; RV32IA-NEXT:  # %bb.2: # %atomicrmw.start
+; RV32IA-NEXT:  .LBB219_2: # %atomicrmw.start
+; RV32IA-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    beq a5, s1, .LBB219_4
-; RV32IA-NEXT:  .LBB219_3: # %atomicrmw.start
+; RV32IA-NEXT:  # %bb.3: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB219_2 Depth=1
 ; RV32IA-NEXT:    sltu a0, s1, a5
 ; RV32IA-NEXT:    j .LBB219_5
 ; RV32IA-NEXT:  .LBB219_4:
 ; RV32IA-NEXT:    sltu a0, s2, a4
 ; RV32IA-NEXT:  .LBB219_5: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB219_2 Depth=1
 ; RV32IA-NEXT:    xori a0, a0, 1
 ; RV32IA-NEXT:    mv a2, a4
 ; RV32IA-NEXT:    mv a3, a5
 ; RV32IA-NEXT:    bnez a0, .LBB219_1
 ; RV32IA-NEXT:  # %bb.6: # %atomicrmw.start
+; RV32IA-NEXT:    # in Loop: Header=BB219_2 Depth=1
 ; RV32IA-NEXT:    mv a2, s2
 ; RV32IA-NEXT:    mv a3, s1
 ; RV32IA-NEXT:    j .LBB219_1
@@ -17877,10 +18037,9 @@ define i64 @atomicrmw_umin_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    ld a3, 0(a0)
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    addi s2, sp, 8
-; RV64I-NEXT:    mv a2, a3
-; RV64I-NEXT:    bltu s1, a3, .LBB219_3
+; RV64I-NEXT:    j .LBB219_2
 ; RV64I-NEXT:  .LBB219_1: # %atomicrmw.start
-; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    # in Loop: Header=BB219_2 Depth=1
 ; RV64I-NEXT:    sd a3, 8(sp)
 ; RV64I-NEXT:    addi a3, zero, 5
 ; RV64I-NEXT:    addi a4, zero, 5
@@ -17889,11 +18048,12 @@ define i64 @atomicrmw_umin_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    call __atomic_compare_exchange_8
 ; RV64I-NEXT:    ld a3, 8(sp)
 ; RV64I-NEXT:    bnez a0, .LBB219_4
-; RV64I-NEXT:  # %bb.2: # %atomicrmw.start
-; RV64I-NEXT:    # in Loop: Header=BB219_1 Depth=1
+; RV64I-NEXT:  .LBB219_2: # %atomicrmw.start
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64I-NEXT:    mv a2, a3
 ; RV64I-NEXT:    bgeu s1, a3, .LBB219_1
-; RV64I-NEXT:  .LBB219_3: # %atomicrmw.start
+; RV64I-NEXT:  # %bb.3: # %atomicrmw.start
+; RV64I-NEXT:    # in Loop: Header=BB219_2 Depth=1
 ; RV64I-NEXT:    mv a2, s1
 ; RV64I-NEXT:    j .LBB219_1
 ; RV64I-NEXT:  .LBB219_4: # %atomicrmw.end

diff  --git a/llvm/test/CodeGen/RISCV/remat.ll b/llvm/test/CodeGen/RISCV/remat.ll
index d8267e7a7ee4..e440515d6322 100644
--- a/llvm/test/CodeGen/RISCV/remat.ll
+++ b/llvm/test/CodeGen/RISCV/remat.ll
@@ -52,20 +52,19 @@ define i32 @test() nounwind {
 ; RV32I-NEXT:    lui s9, %hi(h)
 ; RV32I-NEXT:    lui s10, %hi(c)
 ; RV32I-NEXT:    lui s11, %hi(b)
-; RV32I-NEXT:    lw a1, %lo(l)(s2)
-; RV32I-NEXT:    bnez a1, .LBB0_4
-; RV32I-NEXT:    j .LBB0_5
+; RV32I-NEXT:    j .LBB0_3
 ; RV32I-NEXT:  .LBB0_2: # %for.inc
-; RV32I-NEXT:    # in Loop: Header=BB0_5 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB0_3 Depth=1
 ; RV32I-NEXT:    lw a0, %lo(a)(s6)
 ; RV32I-NEXT:    addi a0, a0, -1
 ; RV32I-NEXT:    sw a0, %lo(a)(s6)
 ; RV32I-NEXT:    beqz a0, .LBB0_11
-; RV32I-NEXT:  # %bb.3: # %for.body
-; RV32I-NEXT:    # in Loop: Header=BB0_5 Depth=1
+; RV32I-NEXT:  .LBB0_3: # %for.body
+; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32I-NEXT:    lw a1, %lo(l)(s2)
 ; RV32I-NEXT:    beqz a1, .LBB0_5
-; RV32I-NEXT:  .LBB0_4: # %if.then
+; RV32I-NEXT:  # %bb.4: # %if.then
+; RV32I-NEXT:    # in Loop: Header=BB0_3 Depth=1
 ; RV32I-NEXT:    lw a1, %lo(b)(s11)
 ; RV32I-NEXT:    lw a2, %lo(c)(s10)
 ; RV32I-NEXT:    lw a3, %lo(d)(s1)
@@ -73,11 +72,11 @@ define i32 @test() nounwind {
 ; RV32I-NEXT:    addi a5, zero, 32
 ; RV32I-NEXT:    call foo
 ; RV32I-NEXT:  .LBB0_5: # %if.end
-; RV32I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB0_3 Depth=1
 ; RV32I-NEXT:    lw a0, %lo(k)(s3)
 ; RV32I-NEXT:    beqz a0, .LBB0_7
 ; RV32I-NEXT:  # %bb.6: # %if.then3
-; RV32I-NEXT:    # in Loop: Header=BB0_5 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB0_3 Depth=1
 ; RV32I-NEXT:    lw a0, %lo(b)(s11)
 ; RV32I-NEXT:    lw a1, %lo(c)(s10)
 ; RV32I-NEXT:    lw a2, %lo(d)(s1)
@@ -86,11 +85,11 @@ define i32 @test() nounwind {
 ; RV32I-NEXT:    addi a5, zero, 64
 ; RV32I-NEXT:    call foo
 ; RV32I-NEXT:  .LBB0_7: # %if.end5
-; RV32I-NEXT:    # in Loop: Header=BB0_5 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB0_3 Depth=1
 ; RV32I-NEXT:    lw a0, %lo(j)(s4)
 ; RV32I-NEXT:    beqz a0, .LBB0_9
 ; RV32I-NEXT:  # %bb.8: # %if.then7
-; RV32I-NEXT:    # in Loop: Header=BB0_5 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB0_3 Depth=1
 ; RV32I-NEXT:    lw a0, %lo(c)(s10)
 ; RV32I-NEXT:    lw a1, %lo(d)(s1)
 ; RV32I-NEXT:    lw a2, %lo(e)(s0)
@@ -99,11 +98,11 @@ define i32 @test() nounwind {
 ; RV32I-NEXT:    addi a5, zero, 32
 ; RV32I-NEXT:    call foo
 ; RV32I-NEXT:  .LBB0_9: # %if.end9
-; RV32I-NEXT:    # in Loop: Header=BB0_5 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB0_3 Depth=1
 ; RV32I-NEXT:    lw a0, %lo(i)(s5)
 ; RV32I-NEXT:    beqz a0, .LBB0_2
 ; RV32I-NEXT:  # %bb.10: # %if.then11
-; RV32I-NEXT:    # in Loop: Header=BB0_5 Depth=1
+; RV32I-NEXT:    # in Loop: Header=BB0_3 Depth=1
 ; RV32I-NEXT:    lw a0, %lo(d)(s1)
 ; RV32I-NEXT:    lw a1, %lo(e)(s0)
 ; RV32I-NEXT:    lw a2, %lo(f)(s7)

diff  --git a/llvm/test/CodeGen/Thumb2/2010-02-11-phi-cycle.ll b/llvm/test/CodeGen/Thumb2/2010-02-11-phi-cycle.ll
index fff83c546678..bf3a0f803b05 100644
--- a/llvm/test/CodeGen/Thumb2/2010-02-11-phi-cycle.ll
+++ b/llvm/test/CodeGen/Thumb2/2010-02-11-phi-cycle.ll
@@ -31,9 +31,14 @@ return:                                           ; preds = %bb, %entry
 
 define i32 @test_dead_cycle(i32 %n) nounwind {
 ; CHECK-LABEL: test_dead_cycle:
+; CHECK: subs
+; also check for duplicate induction variables (radar 7645034)
+; CHECK: subs r{{.*}}, #1
+; CHECK-NOT: subs r{{.*}}, #1
 ; CHECK: bl
 ; CHECK-NOT: mov
 ; CHECK: bl
+; CHECK: pop
 entry:
   %0 = icmp eq i32 %n, 1                          ; <i1> [#uses=1]
   br i1 %0, label %return, label %bb.nph
@@ -58,10 +63,6 @@ bb1:                                              ; preds = %bb
   br label %bb2
 
 bb2:                                              ; preds = %bb1, %bb
-; also check for duplicate induction variables (radar 7645034)
-; CHECK: subs r{{.*}}, #1
-; CHECK-NOT: subs r{{.*}}, #1
-; CHECK: pop
   %u.0 = phi i64 [ %ins, %bb1 ], [ %u.17, %bb ]   ; <i64> [#uses=2]
   %indvar.next = add i32 %indvar, 1               ; <i32> [#uses=2]
   %exitcond = icmp eq i32 %indvar.next, %tmp      ; <i1> [#uses=1]

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/mve-float-loops.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/mve-float-loops.ll
index 841b6631eba1..44548484ec1a 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/mve-float-loops.ll
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/mve-float-loops.ll
@@ -1532,7 +1532,7 @@ define arm_aapcs_vfpcc float @half_half_mac(half* nocapture readonly %a, half* n
 ; CHECK-NEXT:    b .LBB9_6
 ; CHECK-NEXT:  .LBB9_3:
 ; CHECK-NEXT:    vldr s0, .LCPI9_0
-; CHECK-NEXT:    pop {r4, r5, r7, pc}
+; CHECK-NEXT:    b .LBB9_9
 ; CHECK-NEXT:  .LBB9_4: @ %for.body.preheader.new
 ; CHECK-NEXT:    subs r2, r2, r4
 ; CHECK-NEXT:    movs r3, #1
@@ -1693,7 +1693,7 @@ define arm_aapcs_vfpcc float @half_half_acc(half* nocapture readonly %a, half* n
 ; CHECK-NEXT:    b .LBB10_6
 ; CHECK-NEXT:  .LBB10_3:
 ; CHECK-NEXT:    vldr s0, .LCPI10_0
-; CHECK-NEXT:    pop {r4, r5, r7, pc}
+; CHECK-NEXT:    b .LBB10_9
 ; CHECK-NEXT:  .LBB10_4: @ %for.body.preheader.new
 ; CHECK-NEXT:    subs r2, r2, r4
 ; CHECK-NEXT:    movs r3, #1
@@ -1854,7 +1854,7 @@ define arm_aapcs_vfpcc float @half_short_mac(half* nocapture readonly %a, i16* n
 ; CHECK-NEXT:    b .LBB11_6
 ; CHECK-NEXT:  .LBB11_3:
 ; CHECK-NEXT:    vldr s0, .LCPI11_0
-; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, pc}
+; CHECK-NEXT:    b .LBB11_9
 ; CHECK-NEXT:  .LBB11_4: @ %for.body.preheader.new
 ; CHECK-NEXT:    subs r2, r2, r7
 ; CHECK-NEXT:    movs r3, #1

diff  --git a/llvm/test/CodeGen/Thumb2/cbnz.ll b/llvm/test/CodeGen/Thumb2/cbnz.ll
index e11c4038678c..51b15a2ddf61 100644
--- a/llvm/test/CodeGen/Thumb2/cbnz.ll
+++ b/llvm/test/CodeGen/Thumb2/cbnz.ll
@@ -5,7 +5,7 @@ declare void @y()
 
 define void @f(i32 %x, i32 %y) {
   ; CHECK-LABEL: f:
-  ; CHECK: cbnz
+  ; CHECK: cbz
   %p = icmp eq i32 %x, 0
   br i1 %p, label %t, label %f
 
@@ -26,7 +26,7 @@ t:
   call void @x()
   call void @x()
   call void @x()
-  ; CHECK: cbz
+  ; CHECK: bne
   %q = icmp eq i32 %y, 0
   br i1 %q, label %t2, label %f
 

diff  --git a/llvm/test/CodeGen/X86/mmx-coalescing.ll b/llvm/test/CodeGen/X86/mmx-coalescing.ll
index 8cd57aa8c534..cd343c4c66a5 100644
--- a/llvm/test/CodeGen/X86/mmx-coalescing.ll
+++ b/llvm/test/CodeGen/X86/mmx-coalescing.ll
@@ -16,17 +16,14 @@ define i32 @test(%SA* %pSA, i16* %A, i32 %B, i32 %C, i32 %D, i8* %E) {
 ; CHECK-NEXT:  # %bb.2: # %if.B
 ; CHECK-NEXT:    pshufw $238, %mm0, %mm0 # mm0 = mm0[2,3,2,3]
 ; CHECK-NEXT:    movq %mm0, %rax
-; CHECK-NEXT:    testl %eax, %eax
-; CHECK-NEXT:    jne .LBB0_4
+; CHECK-NEXT:    jmp .LBB0_3
 ; CHECK-NEXT:  .LBB0_1: # %if.A
-; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movd %edx, %mm1
 ; CHECK-NEXT:    psllq %mm1, %mm0
 ; CHECK-NEXT:    movq %mm0, %rax
 ; CHECK-NEXT:    testq %rax, %rax
 ; CHECK-NEXT:    jne .LBB0_4
-; CHECK-NEXT:  # %bb.3: # %if.C
-; CHECK-NEXT:    # in Loop: Header=BB0_1 Depth=1
+; CHECK-NEXT:  .LBB0_3: # %if.C
 ; CHECK-NEXT:    testl %eax, %eax
 ; CHECK-NEXT:    je .LBB0_1
 ; CHECK-NEXT:  .LBB0_4: # %merge

diff  --git a/llvm/test/CodeGen/X86/pr38795.ll b/llvm/test/CodeGen/X86/pr38795.ll
index 11534a920c27..d805dcad8b6e 100644
--- a/llvm/test/CodeGen/X86/pr38795.ll
+++ b/llvm/test/CodeGen/X86/pr38795.ll
@@ -70,8 +70,15 @@ define dso_local void @fn() {
 ; CHECK-NEXT:    movb %dl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
 ; CHECK-NEXT:    movb %dh, %dl
 ; CHECK-NEXT:    jne .LBB0_16
+; CHECK-NEXT:    jmp .LBB0_6
 ; CHECK-NEXT:    .p2align 4, 0x90
-; CHECK-NEXT:  # %bb.6: # %for.cond35
+; CHECK-NEXT:  .LBB0_3: # %if.then
+; CHECK-NEXT:    # in Loop: Header=BB0_1 Depth=1
+; CHECK-NEXT:    movl $.str, (%esp)
+; CHECK-NEXT:    calll printf
+; CHECK-NEXT:    movb {{[-0-9]+}}(%e{{[sb]}}p), %dl # 1-byte Reload
+; CHECK-NEXT:    # implicit-def: $eax
+; CHECK-NEXT:  .LBB0_6: # %for.cond35
 ; CHECK-NEXT:    # in Loop: Header=BB0_1 Depth=1
 ; CHECK-NEXT:    testl %edi, %edi
 ; CHECK-NEXT:    je .LBB0_7
@@ -96,22 +103,10 @@ define dso_local void @fn() {
 ; CHECK-NEXT:    # implicit-def: $ebp
 ; CHECK-NEXT:    jmp .LBB0_20
 ; CHECK-NEXT:    .p2align 4, 0x90
-; CHECK-NEXT:  .LBB0_3: # %if.then
-; CHECK-NEXT:    # in Loop: Header=BB0_1 Depth=1
-; CHECK-NEXT:    movl $.str, (%esp)
-; CHECK-NEXT:    calll printf
-; CHECK-NEXT:    movb {{[-0-9]+}}(%e{{[sb]}}p), %dl # 1-byte Reload
-; CHECK-NEXT:    # implicit-def: $eax
-; CHECK-NEXT:    testl %edi, %edi
-; CHECK-NEXT:    jne .LBB0_11
-; CHECK-NEXT:    jmp .LBB0_7
-; CHECK-NEXT:    .p2align 4, 0x90
 ; CHECK-NEXT:  .LBB0_8: # %if.end21
 ; CHECK-NEXT:    # in Loop: Header=BB0_1 Depth=1
 ; CHECK-NEXT:    # implicit-def: $ebp
-; CHECK-NEXT:    testb %bl, %bl
-; CHECK-NEXT:    je .LBB0_13
-; CHECK-NEXT:    jmp .LBB0_10
+; CHECK-NEXT:    jmp .LBB0_9
 ; CHECK-NEXT:    .p2align 4, 0x90
 ; CHECK-NEXT:  .LBB0_7: # in Loop: Header=BB0_1 Depth=1
 ; CHECK-NEXT:    xorl %edi, %edi
@@ -127,11 +122,11 @@ define dso_local void @fn() {
 ; CHECK-NEXT:    # in Loop: Header=BB0_20 Depth=2
 ; CHECK-NEXT:    testb %bl, %bl
 ; CHECK-NEXT:    jne .LBB0_20
-; CHECK-NEXT:  # %bb.9: # %ae
+; CHECK-NEXT:  .LBB0_9: # %ae
 ; CHECK-NEXT:    # in Loop: Header=BB0_1 Depth=1
 ; CHECK-NEXT:    testb %bl, %bl
 ; CHECK-NEXT:    jne .LBB0_10
-; CHECK-NEXT:  .LBB0_13: # %if.end26
+; CHECK-NEXT:  # %bb.13: # %if.end26
 ; CHECK-NEXT:    # in Loop: Header=BB0_1 Depth=1
 ; CHECK-NEXT:    xorl %ecx, %ecx
 ; CHECK-NEXT:    testb %dl, %dl

diff  --git a/llvm/test/CodeGen/X86/ragreedy-hoist-spill.ll b/llvm/test/CodeGen/X86/ragreedy-hoist-spill.ll
index 92708d33924f..670477c4c285 100644
--- a/llvm/test/CodeGen/X86/ragreedy-hoist-spill.ll
+++ b/llvm/test/CodeGen/X86/ragreedy-hoist-spill.ll
@@ -114,7 +114,7 @@ define i8* @SyFgets(i8* %line, i64 %length, i64 %fid) {
 ; CHECK-NEXT:    jle LBB0_22
 ; CHECK-NEXT:  LBB0_13: ## %while.body200
 ; CHECK-NEXT:    ## =>This Loop Header: Depth=1
-; CHECK-NEXT:    ## Child Loop BB0_30 Depth 2
+; CHECK-NEXT:    ## Child Loop BB0_29 Depth 2
 ; CHECK-NEXT:    ## Child Loop BB0_38 Depth 2
 ; CHECK-NEXT:    leal -268(%rbp), %eax
 ; CHECK-NEXT:    cmpl $105, %eax
@@ -160,27 +160,25 @@ define i8* @SyFgets(i8* %line, i64 %length, i64 %fid) {
 ; CHECK-NEXT:  ## %bb.28: ## %land.rhs485.preheader
 ; CHECK-NEXT:    ## in Loop: Header=BB0_13 Depth=1
 ; CHECK-NEXT:    ## implicit-def: $rax
-; CHECK-NEXT:    testb %al, %al
-; CHECK-NEXT:    jns LBB0_30
-; CHECK-NEXT:    jmp LBB0_55
+; CHECK-NEXT:    jmp LBB0_29
 ; CHECK-NEXT:    .p2align 4, 0x90
 ; CHECK-NEXT:  LBB0_32: ## %do.body479.backedge
-; CHECK-NEXT:    ## in Loop: Header=BB0_30 Depth=2
+; CHECK-NEXT:    ## in Loop: Header=BB0_29 Depth=2
 ; CHECK-NEXT:    leaq 1(%r14), %rax
 ; CHECK-NEXT:    testb %bl, %bl
 ; CHECK-NEXT:    je LBB0_33
-; CHECK-NEXT:  ## %bb.29: ## %land.rhs485
-; CHECK-NEXT:    ## in Loop: Header=BB0_30 Depth=2
-; CHECK-NEXT:    testb %al, %al
-; CHECK-NEXT:    js LBB0_55
-; CHECK-NEXT:  LBB0_30: ## %cond.true.i.i2780
+; CHECK-NEXT:  LBB0_29: ## %land.rhs485
 ; CHECK-NEXT:    ## Parent Loop BB0_13 Depth=1
 ; CHECK-NEXT:    ## => This Inner Loop Header: Depth=2
+; CHECK-NEXT:    testb %al, %al
+; CHECK-NEXT:    js LBB0_55
+; CHECK-NEXT:  ## %bb.30: ## %cond.true.i.i2780
+; CHECK-NEXT:    ## in Loop: Header=BB0_29 Depth=2
 ; CHECK-NEXT:    movq %rax, %r14
 ; CHECK-NEXT:    testb %bl, %bl
 ; CHECK-NEXT:    jne LBB0_32
 ; CHECK-NEXT:  ## %bb.31: ## %lor.rhs500
-; CHECK-NEXT:    ## in Loop: Header=BB0_30 Depth=2
+; CHECK-NEXT:    ## in Loop: Header=BB0_29 Depth=2
 ; CHECK-NEXT:    movl $256, %esi ## imm = 0x100
 ; CHECK-NEXT:    callq ___maskrune
 ; CHECK-NEXT:    testb %bl, %bl

diff  --git a/llvm/test/CodeGen/X86/reverse_branches.ll b/llvm/test/CodeGen/X86/reverse_branches.ll
index fabde1679494..170fc6a76280 100644
--- a/llvm/test/CodeGen/X86/reverse_branches.ll
+++ b/llvm/test/CodeGen/X86/reverse_branches.ll
@@ -36,24 +36,24 @@ define i32 @test_branches_order() uwtable ssp {
 ; CHECK-NEXT:    xorl %r12d, %r12d
 ; CHECK-NEXT:    leaq -{{[0-9]+}}(%rsp), %r14
 ; CHECK-NEXT:    movq %rsp, %r15
-; CHECK-NEXT:    cmpl $999, %r12d ## imm = 0x3E7
-; CHECK-NEXT:    jle LBB0_2
-; CHECK-NEXT:    jmp LBB0_7
+; CHECK-NEXT:    jmp LBB0_1
 ; CHECK-NEXT:    .p2align 4, 0x90
 ; CHECK-NEXT:  LBB0_6: ## %for.inc9
-; CHECK-NEXT:    ## in Loop: Header=BB0_2 Depth=1
+; CHECK-NEXT:    ## in Loop: Header=BB0_1 Depth=1
 ; CHECK-NEXT:    incl %r12d
-; CHECK-NEXT:    cmpl $999, %r12d ## imm = 0x3E7
-; CHECK-NEXT:    jg LBB0_7
-; CHECK-NEXT:  LBB0_2: ## %for.cond1.preheader
+; CHECK-NEXT:  LBB0_1: ## %for.cond
 ; CHECK-NEXT:    ## =>This Loop Header: Depth=1
 ; CHECK-NEXT:    ## Child Loop BB0_3 Depth 2
+; CHECK-NEXT:    cmpl $999, %r12d ## imm = 0x3E7
+; CHECK-NEXT:    jg LBB0_7
+; CHECK-NEXT:  ## %bb.2: ## %for.cond1.preheader
+; CHECK-NEXT:    ## in Loop: Header=BB0_1 Depth=1
 ; CHECK-NEXT:    movl $-1, %r13d
 ; CHECK-NEXT:    movq %r15, %rbx
 ; CHECK-NEXT:    movq %r14, %rbp
 ; CHECK-NEXT:    .p2align 4, 0x90
 ; CHECK-NEXT:  LBB0_3: ## %for.cond1
-; CHECK-NEXT:    ## Parent Loop BB0_2 Depth=1
+; CHECK-NEXT:    ## Parent Loop BB0_1 Depth=1
 ; CHECK-NEXT:    ## => This Inner Loop Header: Depth=2
 ; CHECK-NEXT:    incl %r13d
 ; CHECK-NEXT:    cmpl $999, %r13d ## imm = 0x3E7
@@ -74,47 +74,45 @@ define i32 @test_branches_order() uwtable ssp {
 ; CHECK-NEXT:    callq _puts
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    movq %rsp, %rcx
-; CHECK-NEXT:    cmpl $999, %eax ## imm = 0x3E7
-; CHECK-NEXT:    jle LBB0_9
-; CHECK-NEXT:    jmp LBB0_16
+; CHECK-NEXT:    jmp LBB0_8
 ; CHECK-NEXT:    .p2align 4, 0x90
 ; CHECK-NEXT:  LBB0_15: ## %for.inc38
-; CHECK-NEXT:    ## in Loop: Header=BB0_9 Depth=1
+; CHECK-NEXT:    ## in Loop: Header=BB0_8 Depth=1
 ; CHECK-NEXT:    incl %eax
-; CHECK-NEXT:    cmpl $999, %eax ## imm = 0x3E7
-; CHECK-NEXT:    jg LBB0_16
-; CHECK-NEXT:  LBB0_9: ## %for.cond18.preheader
+; CHECK-NEXT:  LBB0_8: ## %for.cond14
 ; CHECK-NEXT:    ## =>This Loop Header: Depth=1
-; CHECK-NEXT:    ## Child Loop BB0_11 Depth 2
+; CHECK-NEXT:    ## Child Loop BB0_10 Depth 2
 ; CHECK-NEXT:    ## Child Loop BB0_12 Depth 3
+; CHECK-NEXT:    cmpl $999, %eax ## imm = 0x3E7
+; CHECK-NEXT:    jg LBB0_16
+; CHECK-NEXT:  ## %bb.9: ## %for.cond18.preheader
+; CHECK-NEXT:    ## in Loop: Header=BB0_8 Depth=1
 ; CHECK-NEXT:    movq %rcx, %rdx
 ; CHECK-NEXT:    xorl %esi, %esi
 ; CHECK-NEXT:    xorl %edi, %edi
-; CHECK-NEXT:    cmpl $999, %edi ## imm = 0x3E7
-; CHECK-NEXT:    jle LBB0_11
-; CHECK-NEXT:    jmp LBB0_15
+; CHECK-NEXT:    jmp LBB0_10
 ; CHECK-NEXT:    .p2align 4, 0x90
 ; CHECK-NEXT:  LBB0_14: ## %exit
-; CHECK-NEXT:    ## in Loop: Header=BB0_11 Depth=2
+; CHECK-NEXT:    ## in Loop: Header=BB0_10 Depth=2
 ; CHECK-NEXT:    addq %rsi, %rbp
 ; CHECK-NEXT:    incq %rdi
 ; CHECK-NEXT:    decq %rsi
 ; CHECK-NEXT:    addq $1001, %rdx ## imm = 0x3E9
 ; CHECK-NEXT:    cmpq $-1000, %rbp ## imm = 0xFC18
 ; CHECK-NEXT:    jne LBB0_5
-; CHECK-NEXT:  ## %bb.10: ## %for.cond18
-; CHECK-NEXT:    ## in Loop: Header=BB0_11 Depth=2
-; CHECK-NEXT:    cmpl $999, %edi ## imm = 0x3E7
-; CHECK-NEXT:    jg LBB0_15
-; CHECK-NEXT:  LBB0_11: ## %for.body20
-; CHECK-NEXT:    ## Parent Loop BB0_9 Depth=1
+; CHECK-NEXT:  LBB0_10: ## %for.cond18
+; CHECK-NEXT:    ## Parent Loop BB0_8 Depth=1
 ; CHECK-NEXT:    ## => This Loop Header: Depth=2
 ; CHECK-NEXT:    ## Child Loop BB0_12 Depth 3
+; CHECK-NEXT:    cmpl $999, %edi ## imm = 0x3E7
+; CHECK-NEXT:    jg LBB0_15
+; CHECK-NEXT:  ## %bb.11: ## %for.body20
+; CHECK-NEXT:    ## in Loop: Header=BB0_10 Depth=2
 ; CHECK-NEXT:    movq $-1000, %rbp ## imm = 0xFC18
 ; CHECK-NEXT:    .p2align 4, 0x90
 ; CHECK-NEXT:  LBB0_12: ## %do.body.i
-; CHECK-NEXT:    ## Parent Loop BB0_9 Depth=1
-; CHECK-NEXT:    ## Parent Loop BB0_11 Depth=2
+; CHECK-NEXT:    ## Parent Loop BB0_8 Depth=1
+; CHECK-NEXT:    ## Parent Loop BB0_10 Depth=2
 ; CHECK-NEXT:    ## => This Inner Loop Header: Depth=3
 ; CHECK-NEXT:    cmpb $120, 1000(%rdx,%rbp)
 ; CHECK-NEXT:    je LBB0_14

diff  --git a/llvm/test/CodeGen/X86/shadow-stack.ll b/llvm/test/CodeGen/X86/shadow-stack.ll
index 6fa413781820..bb5461062ded 100644
--- a/llvm/test/CodeGen/X86/shadow-stack.ll
+++ b/llvm/test/CodeGen/X86/shadow-stack.ll
@@ -141,15 +141,14 @@ define i32 @foo(i32 %i) local_unnamed_addr {
 ; X86_64-NEXT:    #EH_SjLj_Setup LBB1_4
 ; X86_64-NEXT:  ## %bb.1: ## %entry
 ; X86_64-NEXT:    xorl %eax, %eax
-; X86_64-NEXT:    testl %eax, %eax
-; X86_64-NEXT:    jne LBB1_3
-; X86_64-NEXT:    jmp LBB1_5
+; X86_64-NEXT:    jmp LBB1_2
 ; X86_64-NEXT:  LBB1_4: ## Block address taken
 ; X86_64-NEXT:    ## %entry
 ; X86_64-NEXT:    movl $1, %eax
+; X86_64-NEXT:  LBB1_2: ## %entry
 ; X86_64-NEXT:    testl %eax, %eax
 ; X86_64-NEXT:    je LBB1_5
-; X86_64-NEXT:  LBB1_3: ## %if.end
+; X86_64-NEXT:  ## %bb.3: ## %if.end
 ; X86_64-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rax ## 8-byte Reload
 ; X86_64-NEXT:    shll $2, %eax
 ; X86_64-NEXT:    leal (%rax,%rax,2), %eax
@@ -190,15 +189,14 @@ define i32 @foo(i32 %i) local_unnamed_addr {
 ; X86-NEXT:    #EH_SjLj_Setup LBB1_4
 ; X86-NEXT:  ## %bb.1: ## %entry
 ; X86-NEXT:    xorl %eax, %eax
-; X86-NEXT:    testl %eax, %eax
-; X86-NEXT:    jne LBB1_3
-; X86-NEXT:    jmp LBB1_5
+; X86-NEXT:    jmp LBB1_2
 ; X86-NEXT:  LBB1_4: ## Block address taken
 ; X86-NEXT:    ## %entry
 ; X86-NEXT:    movl $1, %eax
+; X86-NEXT:  LBB1_2: ## %entry
 ; X86-NEXT:    testl %eax, %eax
 ; X86-NEXT:    je LBB1_5
-; X86-NEXT:  LBB1_3: ## %if.end
+; X86-NEXT:  ## %bb.3: ## %if.end
 ; X86-NEXT:    movl 8(%ebp), %eax
 ; X86-NEXT:    shll $2, %eax
 ; X86-NEXT:    leal (%eax,%eax,2), %eax

diff  --git a/llvm/test/CodeGen/X86/speculative-load-hardening.ll b/llvm/test/CodeGen/X86/speculative-load-hardening.ll
index 934581e137f2..c20964fcdaca 100644
--- a/llvm/test/CodeGen/X86/speculative-load-hardening.ll
+++ b/llvm/test/CodeGen/X86/speculative-load-hardening.ll
@@ -411,8 +411,18 @@ define void @test_basic_nested_loop(i32 %a, i32 %b, i32 %c, i32* %ptr1, i32* %pt
 ; X64-LFENCE-NEXT:    pushq %rbx
 ; X64-LFENCE-NEXT:    pushq %rax
 ; X64-LFENCE-NEXT:    testl %edi, %edi
-; X64-LFENCE-NEXT:    jne .LBB3_6
-; X64-LFENCE-NEXT:  # %bb.1: # %l1.header.preheader
+; X64-LFENCE-NEXT:    je .LBB3_1
+; X64-LFENCE-NEXT:  .LBB3_6: # %exit
+; X64-LFENCE-NEXT:    lfence
+; X64-LFENCE-NEXT:    addq $8, %rsp
+; X64-LFENCE-NEXT:    popq %rbx
+; X64-LFENCE-NEXT:    popq %r12
+; X64-LFENCE-NEXT:    popq %r13
+; X64-LFENCE-NEXT:    popq %r14
+; X64-LFENCE-NEXT:    popq %r15
+; X64-LFENCE-NEXT:    popq %rbp
+; X64-LFENCE-NEXT:    retq
+; X64-LFENCE-NEXT:  .LBB3_1: # %l1.header.preheader
 ; X64-LFENCE-NEXT:    movq %r8, %r14
 ; X64-LFENCE-NEXT:    movq %rcx, %rbx
 ; X64-LFENCE-NEXT:    movl %edx, %r13d
@@ -452,16 +462,6 @@ define void @test_basic_nested_loop(i32 %a, i32 %b, i32 %c, i32* %ptr1, i32* %pt
 ; X64-LFENCE-NEXT:    cmpl %r13d, %ebp
 ; X64-LFENCE-NEXT:    jl .LBB3_4
 ; X64-LFENCE-NEXT:    jmp .LBB3_5
-; X64-LFENCE-NEXT:  .LBB3_6: # %exit
-; X64-LFENCE-NEXT:    lfence
-; X64-LFENCE-NEXT:    addq $8, %rsp
-; X64-LFENCE-NEXT:    popq %rbx
-; X64-LFENCE-NEXT:    popq %r12
-; X64-LFENCE-NEXT:    popq %r13
-; X64-LFENCE-NEXT:    popq %r14
-; X64-LFENCE-NEXT:    popq %r15
-; X64-LFENCE-NEXT:    popq %rbp
-; X64-LFENCE-NEXT:    retq
 entry:
   %a.cmp = icmp eq i32 %a, 0
   br i1 %a.cmp, label %l1.header, label %exit

diff  --git a/llvm/test/CodeGen/X86/tail-dup-merge-loop-headers.ll b/llvm/test/CodeGen/X86/tail-dup-merge-loop-headers.ll
index c11ffaba12ac..ceca191082c5 100644
--- a/llvm/test/CodeGen/X86/tail-dup-merge-loop-headers.ll
+++ b/llvm/test/CodeGen/X86/tail-dup-merge-loop-headers.ll
@@ -5,22 +5,23 @@
 define void @tail_dup_merge_loops(i32 %a, i8* %b, i8* %c) local_unnamed_addr #0 {
 ; CHECK-LABEL: tail_dup_merge_loops:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    testl %edi, %edi
-; CHECK-NEXT:    jne .LBB0_2
-; CHECK-NEXT:    jmp .LBB0_5
+; CHECK-NEXT:    jmp .LBB0_1
+; CHECK-NEXT:    .p2align 4, 0x90
 ; CHECK-NEXT:  .LBB0_3: # %inner_loop_exit
-; CHECK-NEXT:    # in Loop: Header=BB0_2 Depth=1
+; CHECK-NEXT:    # in Loop: Header=BB0_1 Depth=1
 ; CHECK-NEXT:    incq %rsi
+; CHECK-NEXT:  .LBB0_1: # %outer_loop_top
+; CHECK-NEXT:    # =>This Loop Header: Depth=1
+; CHECK-NEXT:    # Child Loop BB0_4 Depth 2
 ; CHECK-NEXT:    testl %edi, %edi
 ; CHECK-NEXT:    je .LBB0_5
 ; CHECK-NEXT:    .p2align 4, 0x90
-; CHECK-NEXT:  .LBB0_2: # %inner_loop_top
-; CHECK-NEXT:    # =>This Loop Header: Depth=1
-; CHECK-NEXT:    # Child Loop BB0_4 Depth 2
+; CHECK-NEXT:  # %bb.2: # %inner_loop_top
+; CHECK-NEXT:    # in Loop: Header=BB0_1 Depth=1
 ; CHECK-NEXT:    cmpb $0, (%rsi)
 ; CHECK-NEXT:    js .LBB0_3
 ; CHECK-NEXT:  .LBB0_4: # %inner_loop_latch
-; CHECK-NEXT:    # Parent Loop BB0_2 Depth=1
+; CHECK-NEXT:    # Parent Loop BB0_1 Depth=1
 ; CHECK-NEXT:    # => This Inner Loop Header: Depth=2
 ; CHECK-NEXT:    addq $2, %rsi
 ; CHECK-NEXT:    cmpb $0, (%rsi)
@@ -97,7 +98,7 @@ define i32 @loop_shared_header(i8* %exe, i32 %exesz, i32 %headsize, i32 %min, i3
 ; CHECK-NEXT:    movl $1, %ebx
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    testb %al, %al
-; CHECK-NEXT:    jne .LBB1_26
+; CHECK-NEXT:    jne .LBB1_27
 ; CHECK-NEXT:  # %bb.1: # %if.end19
 ; CHECK-NEXT:    movl %esi, %r13d
 ; CHECK-NEXT:    movq %rdi, %r12
@@ -108,101 +109,106 @@ define i32 @loop_shared_header(i8* %exe, i32 %exesz, i32 %headsize, i32 %min, i3
 ; CHECK-NEXT:    movq %r15, %rdi
 ; CHECK-NEXT:    callq cli_calloc
 ; CHECK-NEXT:    testl %r13d, %r13d
-; CHECK-NEXT:    je .LBB1_25
+; CHECK-NEXT:    je .LBB1_26
 ; CHECK-NEXT:  # %bb.2: # %if.end19
 ; CHECK-NEXT:    testl %ebp, %ebp
-; CHECK-NEXT:    je .LBB1_25
+; CHECK-NEXT:    je .LBB1_26
 ; CHECK-NEXT:  # %bb.3: # %if.end19
 ; CHECK-NEXT:    movq %rax, %rbx
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    testb %al, %al
-; CHECK-NEXT:    jne .LBB1_25
+; CHECK-NEXT:    jne .LBB1_26
 ; CHECK-NEXT:  # %bb.4: # %if.end19
 ; CHECK-NEXT:    cmpq %r12, %rbx
-; CHECK-NEXT:    jb .LBB1_25
+; CHECK-NEXT:    jb .LBB1_26
 ; CHECK-NEXT:  # %bb.5: # %if.end50
 ; CHECK-NEXT:    movq %rbx, %rdi
 ; CHECK-NEXT:    movq %r15, %rdx
 ; CHECK-NEXT:    callq memcpy
 ; CHECK-NEXT:    cmpl $4, %r14d
-; CHECK-NEXT:    jb .LBB1_28
+; CHECK-NEXT:    jb .LBB1_29
 ; CHECK-NEXT:  # %bb.6: # %shared_preheader
 ; CHECK-NEXT:    movb $32, %dl
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    # implicit-def: $rcx
+; CHECK-NEXT:    jmp .LBB1_9
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB1_7: # %merge_predecessor_split
+; CHECK-NEXT:    # in Loop: Header=BB1_9 Depth=1
+; CHECK-NEXT:    movb $32, %dl
+; CHECK-NEXT:    xorl %esi, %esi
+; CHECK-NEXT:  .LBB1_8: # %outer_loop_latch
+; CHECK-NEXT:    # in Loop: Header=BB1_9 Depth=1
+; CHECK-NEXT:    movzwl %si, %esi
+; CHECK-NEXT:    decl %esi
+; CHECK-NEXT:    movzwl %si, %esi
+; CHECK-NEXT:    leaq 1(%rcx,%rsi), %rcx
+; CHECK-NEXT:  .LBB1_9: # %outer_loop_header
+; CHECK-NEXT:    # =>This Loop Header: Depth=1
+; CHECK-NEXT:    # Child Loop BB1_10 Depth 2
 ; CHECK-NEXT:    testl %ebp, %ebp
-; CHECK-NEXT:    je .LBB1_18
+; CHECK-NEXT:    je .LBB1_19
 ; CHECK-NEXT:    .p2align 4, 0x90
-; CHECK-NEXT:  .LBB1_9: # %shared_loop_header
-; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:  .LBB1_10: # %shared_loop_header
+; CHECK-NEXT:    # Parent Loop BB1_9 Depth=1
+; CHECK-NEXT:    # => This Inner Loop Header: Depth=2
 ; CHECK-NEXT:    testq %rbx, %rbx
-; CHECK-NEXT:    jne .LBB1_27
-; CHECK-NEXT:  # %bb.10: # %inner_loop_body
-; CHECK-NEXT:    # in Loop: Header=BB1_9 Depth=1
+; CHECK-NEXT:    jne .LBB1_28
+; CHECK-NEXT:  # %bb.11: # %inner_loop_body
+; CHECK-NEXT:    # in Loop: Header=BB1_10 Depth=2
 ; CHECK-NEXT:    testl %eax, %eax
-; CHECK-NEXT:    jns .LBB1_9
-; CHECK-NEXT:  # %bb.11: # %if.end96.i
+; CHECK-NEXT:    jns .LBB1_10
+; CHECK-NEXT:  # %bb.12: # %if.end96.i
 ; CHECK-NEXT:    # in Loop: Header=BB1_9 Depth=1
 ; CHECK-NEXT:    cmpl $3, %ebp
-; CHECK-NEXT:    jae .LBB1_22
-; CHECK-NEXT:  # %bb.12: # %if.end287.i
+; CHECK-NEXT:    jae .LBB1_23
+; CHECK-NEXT:  # %bb.13: # %if.end287.i
 ; CHECK-NEXT:    # in Loop: Header=BB1_9 Depth=1
 ; CHECK-NEXT:    xorl %esi, %esi
 ; CHECK-NEXT:    cmpl $1, %ebp
 ; CHECK-NEXT:    setne %dl
 ; CHECK-NEXT:    testb %al, %al
-; CHECK-NEXT:    jne .LBB1_16
-; CHECK-NEXT:  # %bb.13: # %if.end308.i
+; CHECK-NEXT:    jne .LBB1_17
+; CHECK-NEXT:  # %bb.14: # %if.end308.i
 ; CHECK-NEXT:    # in Loop: Header=BB1_9 Depth=1
 ; CHECK-NEXT:    testb %al, %al
 ; CHECK-NEXT:    je .LBB1_7
-; CHECK-NEXT:  # %bb.14: # %if.end335.i
+; CHECK-NEXT:  # %bb.15: # %if.end335.i
 ; CHECK-NEXT:    # in Loop: Header=BB1_9 Depth=1
 ; CHECK-NEXT:    xorl %edx, %edx
 ; CHECK-NEXT:    testb %dl, %dl
 ; CHECK-NEXT:    movl $0, %esi
 ; CHECK-NEXT:    jne .LBB1_8
-; CHECK-NEXT:  # %bb.15: # %merge_other
+; CHECK-NEXT:  # %bb.16: # %merge_other
 ; CHECK-NEXT:    # in Loop: Header=BB1_9 Depth=1
 ; CHECK-NEXT:    xorl %esi, %esi
-; CHECK-NEXT:    jmp .LBB1_17
-; CHECK-NEXT:  .LBB1_16: # in Loop: Header=BB1_9 Depth=1
+; CHECK-NEXT:    jmp .LBB1_18
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB1_17: # in Loop: Header=BB1_9 Depth=1
 ; CHECK-NEXT:    movb %dl, %sil
 ; CHECK-NEXT:    addl $3, %esi
-; CHECK-NEXT:  .LBB1_17: # %outer_loop_latch
+; CHECK-NEXT:  .LBB1_18: # %outer_loop_latch
 ; CHECK-NEXT:    # in Loop: Header=BB1_9 Depth=1
 ; CHECK-NEXT:    # implicit-def: $dl
 ; CHECK-NEXT:    jmp .LBB1_8
-; CHECK-NEXT:  .LBB1_7: # %merge_predecessor_split
-; CHECK-NEXT:    # in Loop: Header=BB1_9 Depth=1
-; CHECK-NEXT:    movb $32, %dl
-; CHECK-NEXT:    xorl %esi, %esi
-; CHECK-NEXT:  .LBB1_8: # %outer_loop_latch
-; CHECK-NEXT:    # in Loop: Header=BB1_9 Depth=1
-; CHECK-NEXT:    movzwl %si, %esi
-; CHECK-NEXT:    decl %esi
-; CHECK-NEXT:    movzwl %si, %esi
-; CHECK-NEXT:    leaq 1(%rcx,%rsi), %rcx
-; CHECK-NEXT:    testl %ebp, %ebp
-; CHECK-NEXT:    jne .LBB1_9
-; CHECK-NEXT:  .LBB1_18: # %while.cond.us1412.i
+; CHECK-NEXT:  .LBB1_26:
+; CHECK-NEXT:    movl $1, %ebx
+; CHECK-NEXT:    jmp .LBB1_27
+; CHECK-NEXT:  .LBB1_19: # %while.cond.us1412.i
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    testb %al, %al
 ; CHECK-NEXT:    movl $1, %ebx
-; CHECK-NEXT:    jne .LBB1_20
-; CHECK-NEXT:  # %bb.19: # %while.cond.us1412.i
+; CHECK-NEXT:    jne .LBB1_21
+; CHECK-NEXT:  # %bb.20: # %while.cond.us1412.i
 ; CHECK-NEXT:    decb %dl
-; CHECK-NEXT:    jne .LBB1_26
-; CHECK-NEXT:  .LBB1_20: # %if.end41.us1436.i
-; CHECK-NEXT:  .LBB1_25:
-; CHECK-NEXT:    movl $1, %ebx
-; CHECK-NEXT:    jmp .LBB1_26
-; CHECK-NEXT:  .LBB1_22: # %if.then99.i
+; CHECK-NEXT:    jne .LBB1_27
+; CHECK-NEXT:  .LBB1_21: # %if.end41.us1436.i
+; CHECK-NEXT:  .LBB1_23: # %if.then99.i
 ; CHECK-NEXT:    xorl %ebx, %ebx
 ; CHECK-NEXT:    movl $.str.6, %edi
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    callq cli_dbgmsg
-; CHECK-NEXT:  .LBB1_26: # %cleanup
+; CHECK-NEXT:  .LBB1_27: # %cleanup
 ; CHECK-NEXT:    movl %ebx, %eax
 ; CHECK-NEXT:    addq $8, %rsp
 ; CHECK-NEXT:    popq %rbx
@@ -212,8 +218,8 @@ define i32 @loop_shared_header(i8* %exe, i32 %exesz, i32 %headsize, i32 %min, i3
 ; CHECK-NEXT:    popq %r15
 ; CHECK-NEXT:    popq %rbp
 ; CHECK-NEXT:    retq
-; CHECK-NEXT:  .LBB1_27: # %wunpsect.exit.thread.loopexit389
-; CHECK-NEXT:  .LBB1_28: # %wunpsect.exit.thread.loopexit391
+; CHECK-NEXT:  .LBB1_28: # %wunpsect.exit.thread.loopexit389
+; CHECK-NEXT:  .LBB1_29: # %wunpsect.exit.thread.loopexit391
 entry:
   %0 = load i32, i32* undef, align 4
   %mul = shl nsw i32 %0, 2

diff  --git a/llvm/test/CodeGen/X86/tail-dup-repeat.ll b/llvm/test/CodeGen/X86/tail-dup-repeat.ll
index bfa1ee611450..9a06c9a723bb 100644
--- a/llvm/test/CodeGen/X86/tail-dup-repeat.ll
+++ b/llvm/test/CodeGen/X86/tail-dup-repeat.ll
@@ -10,28 +10,33 @@
 define void @repeated_tail_dup(i1 %a1, i1 %a2, i32* %a4, i32* %a5, i8* %a6, i32 %a7) #0 align 2 {
 ; CHECK-LABEL: repeated_tail_dup:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    testb $1, %dil
-; CHECK-NEXT:    je .LBB0_3
+; CHECK-NEXT:    jmp .LBB0_1
 ; CHECK-NEXT:    .p2align 4, 0x90
 ; CHECK-NEXT:  .LBB0_2: # %land.lhs.true
+; CHECK-NEXT:    # in Loop: Header=BB0_1 Depth=1
 ; CHECK-NEXT:    movl $10, (%rdx)
 ; CHECK-NEXT:  .LBB0_6: # %dup2
+; CHECK-NEXT:    # in Loop: Header=BB0_1 Depth=1
 ; CHECK-NEXT:    movl $2, (%rcx)
 ; CHECK-NEXT:    testl %r9d, %r9d
 ; CHECK-NEXT:    jne .LBB0_8
 ; CHECK-NEXT:  .LBB0_1: # %for.cond
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    testb $1, %dil
 ; CHECK-NEXT:    jne .LBB0_2
-; CHECK-NEXT:  .LBB0_3: # %if.end56
+; CHECK-NEXT:  # %bb.3: # %if.end56
+; CHECK-NEXT:    # in Loop: Header=BB0_1 Depth=1
 ; CHECK-NEXT:    testb $1, %sil
 ; CHECK-NEXT:    je .LBB0_5
 ; CHECK-NEXT:  # %bb.4: # %if.then64
+; CHECK-NEXT:    # in Loop: Header=BB0_1 Depth=1
 ; CHECK-NEXT:    movb $1, (%r8)
 ; CHECK-NEXT:    testl %r9d, %r9d
 ; CHECK-NEXT:    je .LBB0_1
 ; CHECK-NEXT:    jmp .LBB0_8
 ; CHECK-NEXT:    .p2align 4, 0x90
 ; CHECK-NEXT:  .LBB0_5: # %if.end70
+; CHECK-NEXT:    # in Loop: Header=BB0_1 Depth=1
 ; CHECK-NEXT:    movl $12, (%rdx)
 ; CHECK-NEXT:    jmp .LBB0_6
 ; CHECK-NEXT:  .LBB0_8: # %for.end

diff  --git a/llvm/test/CodeGen/X86/tail-merge-after-mbp.mir b/llvm/test/CodeGen/X86/tail-merge-after-mbp.mir
index bf9b0decc9df..1fbea33cbcbc 100644
--- a/llvm/test/CodeGen/X86/tail-merge-after-mbp.mir
+++ b/llvm/test/CodeGen/X86/tail-merge-after-mbp.mir
@@ -18,30 +18,30 @@ body:             |
   ; CHECK:   TEST64rr $rax, $rax, implicit-def $eflags
   ; CHECK:   JCC_1 %bb.1, 4, implicit $eflags
   ; CHECK: bb.3:
-  ; CHECK:   successors: %bb.4(0x30000000), %bb.8(0x50000000)
+  ; CHECK:   successors: %bb.6(0x30000000), %bb.4(0x50000000)
   ; CHECK:   CMP64mi8 killed $rax, 1, $noreg, 8, $noreg, 0, implicit-def $eflags :: (load 8)
-  ; CHECK:   JCC_1 %bb.8, 5, implicit $eflags
+  ; CHECK:   JCC_1 %bb.6, 4, implicit $eflags
   ; CHECK: bb.4:
-  ; CHECK:   successors: %bb.1(0x30000000), %bb.5(0x50000000)
+  ; CHECK:   $ebp = XOR32rr undef $ebp, undef $ebp, implicit-def dead $eflags
+  ; CHECK:   dead $eax = XOR32rr undef $eax, undef $eax, implicit-def dead $eflags, implicit-def $al
+  ; CHECK:   RETQ $eax
+  ; CHECK: bb.6:
+  ; CHECK:   successors: %bb.1(0x30000000), %bb.7(0x50000000)
   ; CHECK:   $rax = MOV64rm $r14, 1, $noreg, 0, $noreg :: (load 8)
   ; CHECK:   TEST64rr $rax, $rax, implicit-def $eflags
   ; CHECK:   JCC_1 %bb.1, 4, implicit $eflags
-  ; CHECK: bb.5 (align 16):
-  ; CHECK:   successors: %bb.6(0x71555555), %bb.8(0x0eaaaaab)
+  ; CHECK: bb.7 (align 16):
+  ; CHECK:   successors: %bb.8(0x71555555), %bb.4(0x0eaaaaab)
   ; CHECK:   CMP64mi8 killed $rax, 1, $noreg, 8, $noreg, 0, implicit-def $eflags :: (load 8), (load 8)
-  ; CHECK:   JCC_1 %bb.8, 5, implicit $eflags
-  ; CHECK: bb.6:
-  ; CHECK:   successors: %bb.1(0x04000000), %bb.5(0x7c000000)
+  ; CHECK:   JCC_1 %bb.4, 5, implicit $eflags
+  ; CHECK: bb.8:
+  ; CHECK:   successors: %bb.1(0x04000000), %bb.7(0x7c000000)
   ; CHECK:   $rax = MOV64rm $r14, 1, $noreg, 0, $noreg :: (load 8)
   ; CHECK:   TEST64rr $rax, $rax, implicit-def $eflags
-  ; CHECK:   JCC_1 %bb.5, 5, implicit $eflags
+  ; CHECK:   JCC_1 %bb.7, 5, implicit $eflags
   ; CHECK: bb.1:
   ; CHECK:   $ebp = XOR32rr undef $ebp, undef $ebp, implicit-def dead $eflags
   ; CHECK:   RETQ $eax
-  ; CHECK: bb.8:
-  ; CHECK:   $ebp = XOR32rr undef $ebp, undef $ebp, implicit-def dead $eflags
-  ; CHECK:   dead $eax = XOR32rr undef $eax, undef $eax, implicit-def dead $eflags, implicit-def $al
-  ; CHECK:   RETQ $eax
   bb.0:
     successors: %bb.1(0x40000000), %bb.7(0x40000000)
 

diff  --git a/llvm/test/CodeGen/X86/tail-opts.ll b/llvm/test/CodeGen/X86/tail-opts.ll
index 63189af6aeef..27d50dfb0430 100644
--- a/llvm/test/CodeGen/X86/tail-opts.ll
+++ b/llvm/test/CodeGen/X86/tail-opts.ll
@@ -242,75 +242,68 @@ define fastcc void @c_expand_expr_stmt(%union.tree_node* %expr) nounwind {
 ; CHECK-NEXT:    pushq %rbx
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    testb %al, %al
-; CHECK-NEXT:    jne .LBB3_17
+; CHECK-NEXT:    jne .LBB3_9
 ; CHECK-NEXT:  # %bb.1: # %entry
 ; CHECK-NEXT:    movb 0, %bl
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    testb %al, %al
-; CHECK-NEXT:    jne .LBB3_16
+; CHECK-NEXT:    jne .LBB3_8
 ; CHECK-NEXT:  # %bb.2: # %bb.i
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    testb %al, %al
-; CHECK-NEXT:    je .LBB3_16
+; CHECK-NEXT:    je .LBB3_8
 ; CHECK-NEXT:  # %bb.3: # %lvalue_p.exit
 ; CHECK-NEXT:    movq 0, %rax
 ; CHECK-NEXT:    movzbl (%rax), %ecx
 ; CHECK-NEXT:    testl %ecx, %ecx
-; CHECK-NEXT:    je .LBB3_12
+; CHECK-NEXT:    je .LBB3_10
 ; CHECK-NEXT:  # %bb.4: # %lvalue_p.exit
 ; CHECK-NEXT:    cmpl $2, %ecx
-; CHECK-NEXT:    jne .LBB3_5
-; CHECK-NEXT:  # %bb.6: # %bb.i1
+; CHECK-NEXT:    jne .LBB3_15
+; CHECK-NEXT:  # %bb.5: # %bb.i1
 ; CHECK-NEXT:    movq 32(%rax), %rax
 ; CHECK-NEXT:    movzbl 16(%rax), %ecx
 ; CHECK-NEXT:    testl %ecx, %ecx
-; CHECK-NEXT:    je .LBB3_10
-; CHECK-NEXT:  # %bb.7: # %bb.i1
+; CHECK-NEXT:    je .LBB3_13
+; CHECK-NEXT:  # %bb.6: # %bb.i1
 ; CHECK-NEXT:    cmpl $2, %ecx
-; CHECK-NEXT:    jne .LBB3_8
-; CHECK-NEXT:  # %bb.9: # %bb.i.i
+; CHECK-NEXT:    jne .LBB3_15
+; CHECK-NEXT:  # %bb.7: # %bb.i.i
 ; CHECK-NEXT:    xorl %edi, %edi
 ; CHECK-NEXT:    callq lvalue_p
 ; CHECK-NEXT:    testl %eax, %eax
 ; CHECK-NEXT:    setne %al
-; CHECK-NEXT:    testb %al, %al
-; CHECK-NEXT:    je .LBB3_15
-; CHECK-NEXT:    jmp .LBB3_17
-; CHECK-NEXT:  .LBB3_16: # %bb1
+; CHECK-NEXT:    jmp .LBB3_16
+; CHECK-NEXT:  .LBB3_8: # %bb1
 ; CHECK-NEXT:    cmpb $23, %bl
-; CHECK-NEXT:  .LBB3_17: # %bb3
-; CHECK-NEXT:  .LBB3_12: # %bb2.i3
+; CHECK-NEXT:  .LBB3_9: # %bb3
+; CHECK-NEXT:  .LBB3_15:
+; CHECK-NEXT:    xorl %eax, %eax
+; CHECK-NEXT:  .LBB3_16: # %lvalue_p.exit4
+; CHECK-NEXT:    testb %al, %al
+; CHECK-NEXT:    jne .LBB3_9
+; CHECK-NEXT:  # %bb.17: # %lvalue_p.exit4
+; CHECK-NEXT:    testb %bl, %bl
+; CHECK-NEXT:  .LBB3_10: # %bb2.i3
 ; CHECK-NEXT:    movq 8(%rax), %rax
 ; CHECK-NEXT:    movb 16(%rax), %cl
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    cmpb $23, %cl
-; CHECK-NEXT:    je .LBB3_14
-; CHECK-NEXT:  # %bb.13: # %bb2.i3
+; CHECK-NEXT:    je .LBB3_16
+; CHECK-NEXT:  # %bb.11: # %bb2.i3
 ; CHECK-NEXT:    cmpb $16, %cl
-; CHECK-NEXT:    je .LBB3_14
-; CHECK-NEXT:    jmp .LBB3_17
-; CHECK-NEXT:  .LBB3_5:
-; CHECK-NEXT:    xorl %eax, %eax
-; CHECK-NEXT:    testb %al, %al
-; CHECK-NEXT:    je .LBB3_15
-; CHECK-NEXT:    jmp .LBB3_17
-; CHECK-NEXT:  .LBB3_10: # %bb2.i.i2
+; CHECK-NEXT:    je .LBB3_16
+; CHECK-NEXT:    jmp .LBB3_9
+; CHECK-NEXT:  .LBB3_13: # %bb2.i.i2
 ; CHECK-NEXT:    movq 8(%rax), %rax
 ; CHECK-NEXT:    movb 16(%rax), %cl
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    cmpb $16, %cl
-; CHECK-NEXT:    je .LBB3_14
-; CHECK-NEXT:  # %bb.11: # %bb2.i.i2
+; CHECK-NEXT:    je .LBB3_16
+; CHECK-NEXT:  # %bb.14: # %bb2.i.i2
 ; CHECK-NEXT:    cmpb $23, %cl
-; CHECK-NEXT:    je .LBB3_14
-; CHECK-NEXT:    jmp .LBB3_17
-; CHECK-NEXT:  .LBB3_8:
-; CHECK-NEXT:    xorl %eax, %eax
-; CHECK-NEXT:  .LBB3_14: # %lvalue_p.exit4
-; CHECK-NEXT:    testb %al, %al
-; CHECK-NEXT:    jne .LBB3_17
-; CHECK-NEXT:  .LBB3_15: # %lvalue_p.exit4
-; CHECK-NEXT:    testb %bl, %bl
+; CHECK-NEXT:    je .LBB3_16
+; CHECK-NEXT:    jmp .LBB3_9
 entry:
   %tmp4 = load i8, i8* null, align 8                  ; <i8> [#uses=3]
   switch i8 %tmp4, label %bb3 [


        


More information about the llvm-commits mailing list