[llvm] 27bc30c - [RISCV] Add test case to show a case where (mul (and X, 0xffffffff), (and Y, 0xffffffff)) optimization does not improve code.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Sat Mar 20 17:57:18 PDT 2021


Author: Craig Topper
Date: 2021-03-20T17:54:28-07:00
New Revision: 27bc30c39d62f50fd762a8bcc2dcb0401e7263f7

URL: https://github.com/llvm/llvm-project/commit/27bc30c39d62f50fd762a8bcc2dcb0401e7263f7
DIFF: https://github.com/llvm/llvm-project/commit/27bc30c39d62f50fd762a8bcc2dcb0401e7263f7.diff

LOG: [RISCV] Add test case to show a case where (mul (and X, 0xffffffff), (and Y, 0xffffffff)) optimization does not improve code.

If the mul add two users, one of which was a sext.w, the mul
would also be selected to a MULW before our pattern runs. This
causes the ANDs to now be used by the already selected MULW and
the mul we still need to select. They are unneeded on the MULW
since MULW only reads the lower bits. So they get selected to
SLLI+SRLI for the MULW use. The use for the
(mul (and X, 0xffffffff), (and Y, 0xffffffff)) manages to reuse
the SLLI.

The end result is increased register pressure and no improvement
to how soon we can start the MULW.

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/xaluo.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/xaluo.ll b/llvm/test/CodeGen/RISCV/xaluo.ll
index f34093e8d6f3..b535fd93be76 100644
--- a/llvm/test/CodeGen/RISCV/xaluo.ll
+++ b/llvm/test/CodeGen/RISCV/xaluo.ll
@@ -1031,6 +1031,57 @@ entry:
   ret i1 %obit
 }
 
+; Similar to umulo.i32, but storing the overflow and returning the result.
+define signext i32 @umulo3.i32(i32 signext %0, i32 signext %1, i32* %2) {
+; RV32-LABEL: umulo3.i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    mul a3, a0, a1
+; RV32-NEXT:    mulhu a0, a0, a1
+; RV32-NEXT:    snez a0, a0
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    mv a0, a3
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: umulo3.i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    slli a1, a1, 32
+; RV64-NEXT:    srli a3, a1, 32
+; RV64-NEXT:    slli a0, a0, 32
+; RV64-NEXT:    srli a4, a0, 32
+; RV64-NEXT:    mulhu a0, a0, a1
+; RV64-NEXT:    srli a0, a0, 32
+; RV64-NEXT:    snez a1, a0
+; RV64-NEXT:    mulw a0, a4, a3
+; RV64-NEXT:    sw a1, 0(a2)
+; RV64-NEXT:    ret
+;
+; RV32ZBA-LABEL: umulo3.i32:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    mul a3, a0, a1
+; RV32ZBA-NEXT:    mulhu a0, a0, a1
+; RV32ZBA-NEXT:    snez a0, a0
+; RV32ZBA-NEXT:    sw a0, 0(a2)
+; RV32ZBA-NEXT:    mv a0, a3
+; RV32ZBA-NEXT:    ret
+;
+; RV64ZBA-LABEL: umulo3.i32:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    zext.w a1, a1
+; RV64ZBA-NEXT:    zext.w a0, a0
+; RV64ZBA-NEXT:    mul a3, a0, a1
+; RV64ZBA-NEXT:    srli a3, a3, 32
+; RV64ZBA-NEXT:    snez a3, a3
+; RV64ZBA-NEXT:    mulw a0, a0, a1
+; RV64ZBA-NEXT:    sw a3, 0(a2)
+; RV64ZBA-NEXT:    ret
+  %4 = tail call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %0, i32 %1)
+  %5 = extractvalue { i32, i1 } %4, 1
+  %6 = extractvalue { i32, i1 } %4, 0
+  %7 = zext i1 %5 to i32
+  store i32 %7, i32* %2, align 4
+  ret i32 %6
+}
+
 define zeroext i1 @umulo.i64(i64 %v1, i64 %v2, i64* %res) {
 ; RV32-LABEL: umulo.i64:
 ; RV32:       # %bb.0: # %entry
@@ -1175,10 +1226,10 @@ define i32 @saddo.select.i32(i32 %v1, i32 %v2) {
 ; RV32-NEXT:    add a2, a0, a1
 ; RV32-NEXT:    slt a2, a2, a0
 ; RV32-NEXT:    slti a3, a1, 0
-; RV32-NEXT:    bne a3, a2, .LBB22_2
+; RV32-NEXT:    bne a3, a2, .LBB23_2
 ; RV32-NEXT:  # %bb.1: # %entry
 ; RV32-NEXT:    mv a0, a1
-; RV32-NEXT:  .LBB22_2: # %entry
+; RV32-NEXT:  .LBB23_2: # %entry
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: saddo.select.i32:
@@ -1187,10 +1238,10 @@ define i32 @saddo.select.i32(i32 %v1, i32 %v2) {
 ; RV64-NEXT:    sext.w a3, a0
 ; RV64-NEXT:    add a4, a3, a2
 ; RV64-NEXT:    addw a2, a3, a2
-; RV64-NEXT:    bne a2, a4, .LBB22_2
+; RV64-NEXT:    bne a2, a4, .LBB23_2
 ; RV64-NEXT:  # %bb.1: # %entry
 ; RV64-NEXT:    mv a0, a1
-; RV64-NEXT:  .LBB22_2: # %entry
+; RV64-NEXT:  .LBB23_2: # %entry
 ; RV64-NEXT:    ret
 ;
 ; RV32ZBA-LABEL: saddo.select.i32:
@@ -1198,10 +1249,10 @@ define i32 @saddo.select.i32(i32 %v1, i32 %v2) {
 ; RV32ZBA-NEXT:    add a2, a0, a1
 ; RV32ZBA-NEXT:    slt a2, a2, a0
 ; RV32ZBA-NEXT:    slti a3, a1, 0
-; RV32ZBA-NEXT:    bne a3, a2, .LBB22_2
+; RV32ZBA-NEXT:    bne a3, a2, .LBB23_2
 ; RV32ZBA-NEXT:  # %bb.1: # %entry
 ; RV32ZBA-NEXT:    mv a0, a1
-; RV32ZBA-NEXT:  .LBB22_2: # %entry
+; RV32ZBA-NEXT:  .LBB23_2: # %entry
 ; RV32ZBA-NEXT:    ret
 ;
 ; RV64ZBA-LABEL: saddo.select.i32:
@@ -1210,10 +1261,10 @@ define i32 @saddo.select.i32(i32 %v1, i32 %v2) {
 ; RV64ZBA-NEXT:    sext.w a3, a0
 ; RV64ZBA-NEXT:    add a4, a3, a2
 ; RV64ZBA-NEXT:    addw a2, a3, a2
-; RV64ZBA-NEXT:    bne a2, a4, .LBB22_2
+; RV64ZBA-NEXT:    bne a2, a4, .LBB23_2
 ; RV64ZBA-NEXT:  # %bb.1: # %entry
 ; RV64ZBA-NEXT:    mv a0, a1
-; RV64ZBA-NEXT:  .LBB22_2: # %entry
+; RV64ZBA-NEXT:  .LBB23_2: # %entry
 ; RV64ZBA-NEXT:    ret
 entry:
   %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2)
@@ -1278,11 +1329,11 @@ define i64 @saddo.select.i64(i64 %v1, i64 %v2) {
 ; RV32-NEXT:    xor a5, a1, a3
 ; RV32-NEXT:    not a5, a5
 ; RV32-NEXT:    and a4, a5, a4
-; RV32-NEXT:    bltz a4, .LBB24_2
+; RV32-NEXT:    bltz a4, .LBB25_2
 ; RV32-NEXT:  # %bb.1: # %entry
 ; RV32-NEXT:    mv a0, a2
 ; RV32-NEXT:    mv a1, a3
-; RV32-NEXT:  .LBB24_2: # %entry
+; RV32-NEXT:  .LBB25_2: # %entry
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: saddo.select.i64:
@@ -1290,10 +1341,10 @@ define i64 @saddo.select.i64(i64 %v1, i64 %v2) {
 ; RV64-NEXT:    add a2, a0, a1
 ; RV64-NEXT:    slt a2, a2, a0
 ; RV64-NEXT:    slti a3, a1, 0
-; RV64-NEXT:    bne a3, a2, .LBB24_2
+; RV64-NEXT:    bne a3, a2, .LBB25_2
 ; RV64-NEXT:  # %bb.1: # %entry
 ; RV64-NEXT:    mv a0, a1
-; RV64-NEXT:  .LBB24_2: # %entry
+; RV64-NEXT:  .LBB25_2: # %entry
 ; RV64-NEXT:    ret
 ;
 ; RV32ZBA-LABEL: saddo.select.i64:
@@ -1306,11 +1357,11 @@ define i64 @saddo.select.i64(i64 %v1, i64 %v2) {
 ; RV32ZBA-NEXT:    xor a5, a1, a3
 ; RV32ZBA-NEXT:    not a5, a5
 ; RV32ZBA-NEXT:    and a4, a5, a4
-; RV32ZBA-NEXT:    bltz a4, .LBB24_2
+; RV32ZBA-NEXT:    bltz a4, .LBB25_2
 ; RV32ZBA-NEXT:  # %bb.1: # %entry
 ; RV32ZBA-NEXT:    mv a0, a2
 ; RV32ZBA-NEXT:    mv a1, a3
-; RV32ZBA-NEXT:  .LBB24_2: # %entry
+; RV32ZBA-NEXT:  .LBB25_2: # %entry
 ; RV32ZBA-NEXT:    ret
 ;
 ; RV64ZBA-LABEL: saddo.select.i64:
@@ -1318,10 +1369,10 @@ define i64 @saddo.select.i64(i64 %v1, i64 %v2) {
 ; RV64ZBA-NEXT:    add a2, a0, a1
 ; RV64ZBA-NEXT:    slt a2, a2, a0
 ; RV64ZBA-NEXT:    slti a3, a1, 0
-; RV64ZBA-NEXT:    bne a3, a2, .LBB24_2
+; RV64ZBA-NEXT:    bne a3, a2, .LBB25_2
 ; RV64ZBA-NEXT:  # %bb.1: # %entry
 ; RV64ZBA-NEXT:    mv a0, a1
-; RV64ZBA-NEXT:  .LBB24_2: # %entry
+; RV64ZBA-NEXT:  .LBB25_2: # %entry
 ; RV64ZBA-NEXT:    ret
 entry:
   %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 %v2)
@@ -1387,39 +1438,39 @@ define i32 @uaddo.select.i32(i32 %v1, i32 %v2) {
 ; RV32-LABEL: uaddo.select.i32:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    add a2, a0, a1
-; RV32-NEXT:    bltu a2, a0, .LBB26_2
+; RV32-NEXT:    bltu a2, a0, .LBB27_2
 ; RV32-NEXT:  # %bb.1: # %entry
 ; RV32-NEXT:    mv a0, a1
-; RV32-NEXT:  .LBB26_2: # %entry
+; RV32-NEXT:  .LBB27_2: # %entry
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: uaddo.select.i32:
 ; RV64:       # %bb.0: # %entry
 ; RV64-NEXT:    addw a2, a0, a1
 ; RV64-NEXT:    sext.w a3, a0
-; RV64-NEXT:    bltu a2, a3, .LBB26_2
+; RV64-NEXT:    bltu a2, a3, .LBB27_2
 ; RV64-NEXT:  # %bb.1: # %entry
 ; RV64-NEXT:    mv a0, a1
-; RV64-NEXT:  .LBB26_2: # %entry
+; RV64-NEXT:  .LBB27_2: # %entry
 ; RV64-NEXT:    ret
 ;
 ; RV32ZBA-LABEL: uaddo.select.i32:
 ; RV32ZBA:       # %bb.0: # %entry
 ; RV32ZBA-NEXT:    add a2, a0, a1
-; RV32ZBA-NEXT:    bltu a2, a0, .LBB26_2
+; RV32ZBA-NEXT:    bltu a2, a0, .LBB27_2
 ; RV32ZBA-NEXT:  # %bb.1: # %entry
 ; RV32ZBA-NEXT:    mv a0, a1
-; RV32ZBA-NEXT:  .LBB26_2: # %entry
+; RV32ZBA-NEXT:  .LBB27_2: # %entry
 ; RV32ZBA-NEXT:    ret
 ;
 ; RV64ZBA-LABEL: uaddo.select.i32:
 ; RV64ZBA:       # %bb.0: # %entry
 ; RV64ZBA-NEXT:    addw a2, a0, a1
 ; RV64ZBA-NEXT:    sext.w a3, a0
-; RV64ZBA-NEXT:    bltu a2, a3, .LBB26_2
+; RV64ZBA-NEXT:    bltu a2, a3, .LBB27_2
 ; RV64ZBA-NEXT:  # %bb.1: # %entry
 ; RV64ZBA-NEXT:    mv a0, a1
-; RV64ZBA-NEXT:  .LBB26_2: # %entry
+; RV64ZBA-NEXT:  .LBB27_2: # %entry
 ; RV64ZBA-NEXT:    ret
 entry:
   %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2)
@@ -1472,15 +1523,15 @@ define i64 @uaddo.select.i64(i64 %v1, i64 %v2) {
 ; RV32-NEXT:    add a4, a0, a2
 ; RV32-NEXT:    sltu a4, a4, a0
 ; RV32-NEXT:    add a5, a5, a4
-; RV32-NEXT:    bne a5, a1, .LBB28_3
+; RV32-NEXT:    bne a5, a1, .LBB29_3
 ; RV32-NEXT:  # %bb.1: # %entry
-; RV32-NEXT:    beqz a4, .LBB28_4
-; RV32-NEXT:  .LBB28_2: # %entry
+; RV32-NEXT:    beqz a4, .LBB29_4
+; RV32-NEXT:  .LBB29_2: # %entry
 ; RV32-NEXT:    ret
-; RV32-NEXT:  .LBB28_3: # %entry
+; RV32-NEXT:  .LBB29_3: # %entry
 ; RV32-NEXT:    sltu a4, a5, a1
-; RV32-NEXT:    bnez a4, .LBB28_2
-; RV32-NEXT:  .LBB28_4: # %entry
+; RV32-NEXT:    bnez a4, .LBB29_2
+; RV32-NEXT:  .LBB29_4: # %entry
 ; RV32-NEXT:    mv a0, a2
 ; RV32-NEXT:    mv a1, a3
 ; RV32-NEXT:    ret
@@ -1488,10 +1539,10 @@ define i64 @uaddo.select.i64(i64 %v1, i64 %v2) {
 ; RV64-LABEL: uaddo.select.i64:
 ; RV64:       # %bb.0: # %entry
 ; RV64-NEXT:    add a2, a0, a1
-; RV64-NEXT:    bltu a2, a0, .LBB28_2
+; RV64-NEXT:    bltu a2, a0, .LBB29_2
 ; RV64-NEXT:  # %bb.1: # %entry
 ; RV64-NEXT:    mv a0, a1
-; RV64-NEXT:  .LBB28_2: # %entry
+; RV64-NEXT:  .LBB29_2: # %entry
 ; RV64-NEXT:    ret
 ;
 ; RV32ZBA-LABEL: uaddo.select.i64:
@@ -1500,15 +1551,15 @@ define i64 @uaddo.select.i64(i64 %v1, i64 %v2) {
 ; RV32ZBA-NEXT:    add a4, a0, a2
 ; RV32ZBA-NEXT:    sltu a4, a4, a0
 ; RV32ZBA-NEXT:    add a5, a5, a4
-; RV32ZBA-NEXT:    bne a5, a1, .LBB28_3
+; RV32ZBA-NEXT:    bne a5, a1, .LBB29_3
 ; RV32ZBA-NEXT:  # %bb.1: # %entry
-; RV32ZBA-NEXT:    beqz a4, .LBB28_4
-; RV32ZBA-NEXT:  .LBB28_2: # %entry
+; RV32ZBA-NEXT:    beqz a4, .LBB29_4
+; RV32ZBA-NEXT:  .LBB29_2: # %entry
 ; RV32ZBA-NEXT:    ret
-; RV32ZBA-NEXT:  .LBB28_3: # %entry
+; RV32ZBA-NEXT:  .LBB29_3: # %entry
 ; RV32ZBA-NEXT:    sltu a4, a5, a1
-; RV32ZBA-NEXT:    bnez a4, .LBB28_2
-; RV32ZBA-NEXT:  .LBB28_4: # %entry
+; RV32ZBA-NEXT:    bnez a4, .LBB29_2
+; RV32ZBA-NEXT:  .LBB29_4: # %entry
 ; RV32ZBA-NEXT:    mv a0, a2
 ; RV32ZBA-NEXT:    mv a1, a3
 ; RV32ZBA-NEXT:    ret
@@ -1516,10 +1567,10 @@ define i64 @uaddo.select.i64(i64 %v1, i64 %v2) {
 ; RV64ZBA-LABEL: uaddo.select.i64:
 ; RV64ZBA:       # %bb.0: # %entry
 ; RV64ZBA-NEXT:    add a2, a0, a1
-; RV64ZBA-NEXT:    bltu a2, a0, .LBB28_2
+; RV64ZBA-NEXT:    bltu a2, a0, .LBB29_2
 ; RV64ZBA-NEXT:  # %bb.1: # %entry
 ; RV64ZBA-NEXT:    mv a0, a1
-; RV64ZBA-NEXT:  .LBB28_2: # %entry
+; RV64ZBA-NEXT:  .LBB29_2: # %entry
 ; RV64ZBA-NEXT:    ret
 entry:
   %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 %v2)
@@ -1535,10 +1586,10 @@ define i1 @uaddo.not.i64(i64 %v1, i64 %v2) {
 ; RV32-NEXT:    add a2, a0, a2
 ; RV32-NEXT:    sltu a0, a2, a0
 ; RV32-NEXT:    add a2, a3, a0
-; RV32-NEXT:    beq a2, a1, .LBB29_2
+; RV32-NEXT:    beq a2, a1, .LBB30_2
 ; RV32-NEXT:  # %bb.1: # %entry
 ; RV32-NEXT:    sltu a0, a2, a1
-; RV32-NEXT:  .LBB29_2: # %entry
+; RV32-NEXT:  .LBB30_2: # %entry
 ; RV32-NEXT:    xori a0, a0, 1
 ; RV32-NEXT:    ret
 ;
@@ -1555,10 +1606,10 @@ define i1 @uaddo.not.i64(i64 %v1, i64 %v2) {
 ; RV32ZBA-NEXT:    add a2, a0, a2
 ; RV32ZBA-NEXT:    sltu a0, a2, a0
 ; RV32ZBA-NEXT:    add a2, a3, a0
-; RV32ZBA-NEXT:    beq a2, a1, .LBB29_2
+; RV32ZBA-NEXT:    beq a2, a1, .LBB30_2
 ; RV32ZBA-NEXT:  # %bb.1: # %entry
 ; RV32ZBA-NEXT:    sltu a0, a2, a1
-; RV32ZBA-NEXT:  .LBB29_2: # %entry
+; RV32ZBA-NEXT:  .LBB30_2: # %entry
 ; RV32ZBA-NEXT:    xori a0, a0, 1
 ; RV32ZBA-NEXT:    ret
 ;
@@ -1581,10 +1632,10 @@ define i32 @ssubo.select.i32(i32 %v1, i32 %v2) {
 ; RV32-NEXT:    sgtz a2, a1
 ; RV32-NEXT:    sub a3, a0, a1
 ; RV32-NEXT:    slt a3, a3, a0
-; RV32-NEXT:    bne a2, a3, .LBB30_2
+; RV32-NEXT:    bne a2, a3, .LBB31_2
 ; RV32-NEXT:  # %bb.1: # %entry
 ; RV32-NEXT:    mv a0, a1
-; RV32-NEXT:  .LBB30_2: # %entry
+; RV32-NEXT:  .LBB31_2: # %entry
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: ssubo.select.i32:
@@ -1593,10 +1644,10 @@ define i32 @ssubo.select.i32(i32 %v1, i32 %v2) {
 ; RV64-NEXT:    sext.w a3, a0
 ; RV64-NEXT:    sub a4, a3, a2
 ; RV64-NEXT:    subw a2, a3, a2
-; RV64-NEXT:    bne a2, a4, .LBB30_2
+; RV64-NEXT:    bne a2, a4, .LBB31_2
 ; RV64-NEXT:  # %bb.1: # %entry
 ; RV64-NEXT:    mv a0, a1
-; RV64-NEXT:  .LBB30_2: # %entry
+; RV64-NEXT:  .LBB31_2: # %entry
 ; RV64-NEXT:    ret
 ;
 ; RV32ZBA-LABEL: ssubo.select.i32:
@@ -1604,10 +1655,10 @@ define i32 @ssubo.select.i32(i32 %v1, i32 %v2) {
 ; RV32ZBA-NEXT:    sgtz a2, a1
 ; RV32ZBA-NEXT:    sub a3, a0, a1
 ; RV32ZBA-NEXT:    slt a3, a3, a0
-; RV32ZBA-NEXT:    bne a2, a3, .LBB30_2
+; RV32ZBA-NEXT:    bne a2, a3, .LBB31_2
 ; RV32ZBA-NEXT:  # %bb.1: # %entry
 ; RV32ZBA-NEXT:    mv a0, a1
-; RV32ZBA-NEXT:  .LBB30_2: # %entry
+; RV32ZBA-NEXT:  .LBB31_2: # %entry
 ; RV32ZBA-NEXT:    ret
 ;
 ; RV64ZBA-LABEL: ssubo.select.i32:
@@ -1616,10 +1667,10 @@ define i32 @ssubo.select.i32(i32 %v1, i32 %v2) {
 ; RV64ZBA-NEXT:    sext.w a3, a0
 ; RV64ZBA-NEXT:    sub a4, a3, a2
 ; RV64ZBA-NEXT:    subw a2, a3, a2
-; RV64ZBA-NEXT:    bne a2, a4, .LBB30_2
+; RV64ZBA-NEXT:    bne a2, a4, .LBB31_2
 ; RV64ZBA-NEXT:  # %bb.1: # %entry
 ; RV64ZBA-NEXT:    mv a0, a1
-; RV64ZBA-NEXT:  .LBB30_2: # %entry
+; RV64ZBA-NEXT:  .LBB31_2: # %entry
 ; RV64ZBA-NEXT:    ret
 entry:
   %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %v1, i32 %v2)
@@ -1682,11 +1733,11 @@ define i64 @ssubo.select.i64(i64 %v1, i64 %v2) {
 ; RV32-NEXT:    xor a4, a1, a4
 ; RV32-NEXT:    xor a5, a1, a3
 ; RV32-NEXT:    and a4, a5, a4
-; RV32-NEXT:    bltz a4, .LBB32_2
+; RV32-NEXT:    bltz a4, .LBB33_2
 ; RV32-NEXT:  # %bb.1: # %entry
 ; RV32-NEXT:    mv a0, a2
 ; RV32-NEXT:    mv a1, a3
-; RV32-NEXT:  .LBB32_2: # %entry
+; RV32-NEXT:  .LBB33_2: # %entry
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: ssubo.select.i64:
@@ -1694,10 +1745,10 @@ define i64 @ssubo.select.i64(i64 %v1, i64 %v2) {
 ; RV64-NEXT:    sgtz a2, a1
 ; RV64-NEXT:    sub a3, a0, a1
 ; RV64-NEXT:    slt a3, a3, a0
-; RV64-NEXT:    bne a2, a3, .LBB32_2
+; RV64-NEXT:    bne a2, a3, .LBB33_2
 ; RV64-NEXT:  # %bb.1: # %entry
 ; RV64-NEXT:    mv a0, a1
-; RV64-NEXT:  .LBB32_2: # %entry
+; RV64-NEXT:  .LBB33_2: # %entry
 ; RV64-NEXT:    ret
 ;
 ; RV32ZBA-LABEL: ssubo.select.i64:
@@ -1708,11 +1759,11 @@ define i64 @ssubo.select.i64(i64 %v1, i64 %v2) {
 ; RV32ZBA-NEXT:    xor a4, a1, a4
 ; RV32ZBA-NEXT:    xor a5, a1, a3
 ; RV32ZBA-NEXT:    and a4, a5, a4
-; RV32ZBA-NEXT:    bltz a4, .LBB32_2
+; RV32ZBA-NEXT:    bltz a4, .LBB33_2
 ; RV32ZBA-NEXT:  # %bb.1: # %entry
 ; RV32ZBA-NEXT:    mv a0, a2
 ; RV32ZBA-NEXT:    mv a1, a3
-; RV32ZBA-NEXT:  .LBB32_2: # %entry
+; RV32ZBA-NEXT:  .LBB33_2: # %entry
 ; RV32ZBA-NEXT:    ret
 ;
 ; RV64ZBA-LABEL: ssubo.select.i64:
@@ -1720,10 +1771,10 @@ define i64 @ssubo.select.i64(i64 %v1, i64 %v2) {
 ; RV64ZBA-NEXT:    sgtz a2, a1
 ; RV64ZBA-NEXT:    sub a3, a0, a1
 ; RV64ZBA-NEXT:    slt a3, a3, a0
-; RV64ZBA-NEXT:    bne a2, a3, .LBB32_2
+; RV64ZBA-NEXT:    bne a2, a3, .LBB33_2
 ; RV64ZBA-NEXT:  # %bb.1: # %entry
 ; RV64ZBA-NEXT:    mv a0, a1
-; RV64ZBA-NEXT:  .LBB32_2: # %entry
+; RV64ZBA-NEXT:  .LBB33_2: # %entry
 ; RV64ZBA-NEXT:    ret
 entry:
   %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %v1, i64 %v2)
@@ -1785,39 +1836,39 @@ define i32 @usubo.select.i32(i32 %v1, i32 %v2) {
 ; RV32-LABEL: usubo.select.i32:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    sub a2, a0, a1
-; RV32-NEXT:    bltu a0, a2, .LBB34_2
+; RV32-NEXT:    bltu a0, a2, .LBB35_2
 ; RV32-NEXT:  # %bb.1: # %entry
 ; RV32-NEXT:    mv a0, a1
-; RV32-NEXT:  .LBB34_2: # %entry
+; RV32-NEXT:  .LBB35_2: # %entry
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: usubo.select.i32:
 ; RV64:       # %bb.0: # %entry
 ; RV64-NEXT:    subw a2, a0, a1
 ; RV64-NEXT:    sext.w a3, a0
-; RV64-NEXT:    bltu a3, a2, .LBB34_2
+; RV64-NEXT:    bltu a3, a2, .LBB35_2
 ; RV64-NEXT:  # %bb.1: # %entry
 ; RV64-NEXT:    mv a0, a1
-; RV64-NEXT:  .LBB34_2: # %entry
+; RV64-NEXT:  .LBB35_2: # %entry
 ; RV64-NEXT:    ret
 ;
 ; RV32ZBA-LABEL: usubo.select.i32:
 ; RV32ZBA:       # %bb.0: # %entry
 ; RV32ZBA-NEXT:    sub a2, a0, a1
-; RV32ZBA-NEXT:    bltu a0, a2, .LBB34_2
+; RV32ZBA-NEXT:    bltu a0, a2, .LBB35_2
 ; RV32ZBA-NEXT:  # %bb.1: # %entry
 ; RV32ZBA-NEXT:    mv a0, a1
-; RV32ZBA-NEXT:  .LBB34_2: # %entry
+; RV32ZBA-NEXT:  .LBB35_2: # %entry
 ; RV32ZBA-NEXT:    ret
 ;
 ; RV64ZBA-LABEL: usubo.select.i32:
 ; RV64ZBA:       # %bb.0: # %entry
 ; RV64ZBA-NEXT:    subw a2, a0, a1
 ; RV64ZBA-NEXT:    sext.w a3, a0
-; RV64ZBA-NEXT:    bltu a3, a2, .LBB34_2
+; RV64ZBA-NEXT:    bltu a3, a2, .LBB35_2
 ; RV64ZBA-NEXT:  # %bb.1: # %entry
 ; RV64ZBA-NEXT:    mv a0, a1
-; RV64ZBA-NEXT:  .LBB34_2: # %entry
+; RV64ZBA-NEXT:  .LBB35_2: # %entry
 ; RV64ZBA-NEXT:    ret
 entry:
   %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2)
@@ -1869,28 +1920,28 @@ define i64 @usubo.select.i64(i64 %v1, i64 %v2) {
 ; RV32-NEXT:    sltu a4, a0, a2
 ; RV32-NEXT:    sub a5, a1, a3
 ; RV32-NEXT:    sub a4, a5, a4
-; RV32-NEXT:    beq a4, a1, .LBB36_2
+; RV32-NEXT:    beq a4, a1, .LBB37_2
 ; RV32-NEXT:  # %bb.1: # %entry
 ; RV32-NEXT:    sltu a4, a1, a4
-; RV32-NEXT:    beqz a4, .LBB36_3
-; RV32-NEXT:    j .LBB36_4
-; RV32-NEXT:  .LBB36_2:
+; RV32-NEXT:    beqz a4, .LBB37_3
+; RV32-NEXT:    j .LBB37_4
+; RV32-NEXT:  .LBB37_2:
 ; RV32-NEXT:    sub a4, a0, a2
 ; RV32-NEXT:    sltu a4, a0, a4
-; RV32-NEXT:    bnez a4, .LBB36_4
-; RV32-NEXT:  .LBB36_3: # %entry
+; RV32-NEXT:    bnez a4, .LBB37_4
+; RV32-NEXT:  .LBB37_3: # %entry
 ; RV32-NEXT:    mv a0, a2
 ; RV32-NEXT:    mv a1, a3
-; RV32-NEXT:  .LBB36_4: # %entry
+; RV32-NEXT:  .LBB37_4: # %entry
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: usubo.select.i64:
 ; RV64:       # %bb.0: # %entry
 ; RV64-NEXT:    sub a2, a0, a1
-; RV64-NEXT:    bltu a0, a2, .LBB36_2
+; RV64-NEXT:    bltu a0, a2, .LBB37_2
 ; RV64-NEXT:  # %bb.1: # %entry
 ; RV64-NEXT:    mv a0, a1
-; RV64-NEXT:  .LBB36_2: # %entry
+; RV64-NEXT:  .LBB37_2: # %entry
 ; RV64-NEXT:    ret
 ;
 ; RV32ZBA-LABEL: usubo.select.i64:
@@ -1898,28 +1949,28 @@ define i64 @usubo.select.i64(i64 %v1, i64 %v2) {
 ; RV32ZBA-NEXT:    sltu a4, a0, a2
 ; RV32ZBA-NEXT:    sub a5, a1, a3
 ; RV32ZBA-NEXT:    sub a4, a5, a4
-; RV32ZBA-NEXT:    beq a4, a1, .LBB36_2
+; RV32ZBA-NEXT:    beq a4, a1, .LBB37_2
 ; RV32ZBA-NEXT:  # %bb.1: # %entry
 ; RV32ZBA-NEXT:    sltu a4, a1, a4
-; RV32ZBA-NEXT:    beqz a4, .LBB36_3
-; RV32ZBA-NEXT:    j .LBB36_4
-; RV32ZBA-NEXT:  .LBB36_2:
+; RV32ZBA-NEXT:    beqz a4, .LBB37_3
+; RV32ZBA-NEXT:    j .LBB37_4
+; RV32ZBA-NEXT:  .LBB37_2:
 ; RV32ZBA-NEXT:    sub a4, a0, a2
 ; RV32ZBA-NEXT:    sltu a4, a0, a4
-; RV32ZBA-NEXT:    bnez a4, .LBB36_4
-; RV32ZBA-NEXT:  .LBB36_3: # %entry
+; RV32ZBA-NEXT:    bnez a4, .LBB37_4
+; RV32ZBA-NEXT:  .LBB37_3: # %entry
 ; RV32ZBA-NEXT:    mv a0, a2
 ; RV32ZBA-NEXT:    mv a1, a3
-; RV32ZBA-NEXT:  .LBB36_4: # %entry
+; RV32ZBA-NEXT:  .LBB37_4: # %entry
 ; RV32ZBA-NEXT:    ret
 ;
 ; RV64ZBA-LABEL: usubo.select.i64:
 ; RV64ZBA:       # %bb.0: # %entry
 ; RV64ZBA-NEXT:    sub a2, a0, a1
-; RV64ZBA-NEXT:    bltu a0, a2, .LBB36_2
+; RV64ZBA-NEXT:    bltu a0, a2, .LBB37_2
 ; RV64ZBA-NEXT:  # %bb.1: # %entry
 ; RV64ZBA-NEXT:    mv a0, a1
-; RV64ZBA-NEXT:  .LBB36_2: # %entry
+; RV64ZBA-NEXT:  .LBB37_2: # %entry
 ; RV64ZBA-NEXT:    ret
 entry:
   %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %v1, i64 %v2)
@@ -1934,12 +1985,12 @@ define i1 @usubo.not.i64(i64 %v1, i64 %v2) {
 ; RV32-NEXT:    sltu a4, a0, a2
 ; RV32-NEXT:    sub a3, a1, a3
 ; RV32-NEXT:    sub a3, a3, a4
-; RV32-NEXT:    beq a3, a1, .LBB37_2
+; RV32-NEXT:    beq a3, a1, .LBB38_2
 ; RV32-NEXT:  # %bb.1: # %entry
 ; RV32-NEXT:    sltu a0, a1, a3
 ; RV32-NEXT:    xori a0, a0, 1
 ; RV32-NEXT:    ret
-; RV32-NEXT:  .LBB37_2:
+; RV32-NEXT:  .LBB38_2:
 ; RV32-NEXT:    sub a1, a0, a2
 ; RV32-NEXT:    sltu a0, a0, a1
 ; RV32-NEXT:    xori a0, a0, 1
@@ -1957,12 +2008,12 @@ define i1 @usubo.not.i64(i64 %v1, i64 %v2) {
 ; RV32ZBA-NEXT:    sltu a4, a0, a2
 ; RV32ZBA-NEXT:    sub a3, a1, a3
 ; RV32ZBA-NEXT:    sub a3, a3, a4
-; RV32ZBA-NEXT:    beq a3, a1, .LBB37_2
+; RV32ZBA-NEXT:    beq a3, a1, .LBB38_2
 ; RV32ZBA-NEXT:  # %bb.1: # %entry
 ; RV32ZBA-NEXT:    sltu a0, a1, a3
 ; RV32ZBA-NEXT:    xori a0, a0, 1
 ; RV32ZBA-NEXT:    ret
-; RV32ZBA-NEXT:  .LBB37_2:
+; RV32ZBA-NEXT:  .LBB38_2:
 ; RV32ZBA-NEXT:    sub a1, a0, a2
 ; RV32ZBA-NEXT:    sltu a0, a0, a1
 ; RV32ZBA-NEXT:    xori a0, a0, 1
@@ -1987,10 +2038,10 @@ define i32 @smulo.select.i32(i32 %v1, i32 %v2) {
 ; RV32-NEXT:    mulh a2, a0, a1
 ; RV32-NEXT:    mul a3, a0, a1
 ; RV32-NEXT:    srai a3, a3, 31
-; RV32-NEXT:    bne a2, a3, .LBB38_2
+; RV32-NEXT:    bne a2, a3, .LBB39_2
 ; RV32-NEXT:  # %bb.1: # %entry
 ; RV32-NEXT:    mv a0, a1
-; RV32-NEXT:  .LBB38_2: # %entry
+; RV32-NEXT:  .LBB39_2: # %entry
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: smulo.select.i32:
@@ -1999,10 +2050,10 @@ define i32 @smulo.select.i32(i32 %v1, i32 %v2) {
 ; RV64-NEXT:    sext.w a3, a0
 ; RV64-NEXT:    mul a4, a3, a2
 ; RV64-NEXT:    mulw a2, a3, a2
-; RV64-NEXT:    bne a2, a4, .LBB38_2
+; RV64-NEXT:    bne a2, a4, .LBB39_2
 ; RV64-NEXT:  # %bb.1: # %entry
 ; RV64-NEXT:    mv a0, a1
-; RV64-NEXT:  .LBB38_2: # %entry
+; RV64-NEXT:  .LBB39_2: # %entry
 ; RV64-NEXT:    ret
 ;
 ; RV32ZBA-LABEL: smulo.select.i32:
@@ -2010,10 +2061,10 @@ define i32 @smulo.select.i32(i32 %v1, i32 %v2) {
 ; RV32ZBA-NEXT:    mulh a2, a0, a1
 ; RV32ZBA-NEXT:    mul a3, a0, a1
 ; RV32ZBA-NEXT:    srai a3, a3, 31
-; RV32ZBA-NEXT:    bne a2, a3, .LBB38_2
+; RV32ZBA-NEXT:    bne a2, a3, .LBB39_2
 ; RV32ZBA-NEXT:  # %bb.1: # %entry
 ; RV32ZBA-NEXT:    mv a0, a1
-; RV32ZBA-NEXT:  .LBB38_2: # %entry
+; RV32ZBA-NEXT:  .LBB39_2: # %entry
 ; RV32ZBA-NEXT:    ret
 ;
 ; RV64ZBA-LABEL: smulo.select.i32:
@@ -2022,10 +2073,10 @@ define i32 @smulo.select.i32(i32 %v1, i32 %v2) {
 ; RV64ZBA-NEXT:    sext.w a3, a0
 ; RV64ZBA-NEXT:    mul a4, a3, a2
 ; RV64ZBA-NEXT:    mulw a2, a3, a2
-; RV64ZBA-NEXT:    bne a2, a4, .LBB38_2
+; RV64ZBA-NEXT:    bne a2, a4, .LBB39_2
 ; RV64ZBA-NEXT:  # %bb.1: # %entry
 ; RV64ZBA-NEXT:    mv a0, a1
-; RV64ZBA-NEXT:  .LBB38_2: # %entry
+; RV64ZBA-NEXT:  .LBB39_2: # %entry
 ; RV64ZBA-NEXT:    ret
 entry:
   %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2)
@@ -2102,11 +2153,11 @@ define i64 @smulo.select.i64(i64 %v1, i64 %v2) {
 ; RV32-NEXT:    addi a4, sp, 8
 ; RV32-NEXT:    call __mulodi4 at plt
 ; RV32-NEXT:    lw a0, 8(sp)
-; RV32-NEXT:    bnez a0, .LBB40_2
+; RV32-NEXT:    bnez a0, .LBB41_2
 ; RV32-NEXT:  # %bb.1: # %entry
 ; RV32-NEXT:    mv s1, s3
 ; RV32-NEXT:    mv s0, s2
-; RV32-NEXT:  .LBB40_2: # %entry
+; RV32-NEXT:  .LBB41_2: # %entry
 ; RV32-NEXT:    mv a0, s1
 ; RV32-NEXT:    mv a1, s0
 ; RV32-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
@@ -2122,10 +2173,10 @@ define i64 @smulo.select.i64(i64 %v1, i64 %v2) {
 ; RV64-NEXT:    mulh a2, a0, a1
 ; RV64-NEXT:    mul a3, a0, a1
 ; RV64-NEXT:    srai a3, a3, 63
-; RV64-NEXT:    bne a2, a3, .LBB40_2
+; RV64-NEXT:    bne a2, a3, .LBB41_2
 ; RV64-NEXT:  # %bb.1: # %entry
 ; RV64-NEXT:    mv a0, a1
-; RV64-NEXT:  .LBB40_2: # %entry
+; RV64-NEXT:  .LBB41_2: # %entry
 ; RV64-NEXT:    ret
 ;
 ; RV32ZBA-LABEL: smulo.select.i64:
@@ -2150,11 +2201,11 @@ define i64 @smulo.select.i64(i64 %v1, i64 %v2) {
 ; RV32ZBA-NEXT:    addi a4, sp, 8
 ; RV32ZBA-NEXT:    call __mulodi4 at plt
 ; RV32ZBA-NEXT:    lw a0, 8(sp)
-; RV32ZBA-NEXT:    bnez a0, .LBB40_2
+; RV32ZBA-NEXT:    bnez a0, .LBB41_2
 ; RV32ZBA-NEXT:  # %bb.1: # %entry
 ; RV32ZBA-NEXT:    mv s1, s3
 ; RV32ZBA-NEXT:    mv s0, s2
-; RV32ZBA-NEXT:  .LBB40_2: # %entry
+; RV32ZBA-NEXT:  .LBB41_2: # %entry
 ; RV32ZBA-NEXT:    mv a0, s1
 ; RV32ZBA-NEXT:    mv a1, s0
 ; RV32ZBA-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
@@ -2170,10 +2221,10 @@ define i64 @smulo.select.i64(i64 %v1, i64 %v2) {
 ; RV64ZBA-NEXT:    mulh a2, a0, a1
 ; RV64ZBA-NEXT:    mul a3, a0, a1
 ; RV64ZBA-NEXT:    srai a3, a3, 63
-; RV64ZBA-NEXT:    bne a2, a3, .LBB40_2
+; RV64ZBA-NEXT:    bne a2, a3, .LBB41_2
 ; RV64ZBA-NEXT:  # %bb.1: # %entry
 ; RV64ZBA-NEXT:    mv a0, a1
-; RV64ZBA-NEXT:  .LBB40_2: # %entry
+; RV64ZBA-NEXT:  .LBB41_2: # %entry
 ; RV64ZBA-NEXT:    ret
 entry:
   %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2)
@@ -2241,10 +2292,10 @@ define i32 @umulo.select.i32(i32 %v1, i32 %v2) {
 ; RV32-LABEL: umulo.select.i32:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    mulhu a2, a0, a1
-; RV32-NEXT:    bnez a2, .LBB42_2
+; RV32-NEXT:    bnez a2, .LBB43_2
 ; RV32-NEXT:  # %bb.1: # %entry
 ; RV32-NEXT:    mv a0, a1
-; RV32-NEXT:  .LBB42_2: # %entry
+; RV32-NEXT:  .LBB43_2: # %entry
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: umulo.select.i32:
@@ -2253,19 +2304,19 @@ define i32 @umulo.select.i32(i32 %v1, i32 %v2) {
 ; RV64-NEXT:    slli a3, a0, 32
 ; RV64-NEXT:    mulhu a2, a3, a2
 ; RV64-NEXT:    srli a2, a2, 32
-; RV64-NEXT:    bnez a2, .LBB42_2
+; RV64-NEXT:    bnez a2, .LBB43_2
 ; RV64-NEXT:  # %bb.1: # %entry
 ; RV64-NEXT:    mv a0, a1
-; RV64-NEXT:  .LBB42_2: # %entry
+; RV64-NEXT:  .LBB43_2: # %entry
 ; RV64-NEXT:    ret
 ;
 ; RV32ZBA-LABEL: umulo.select.i32:
 ; RV32ZBA:       # %bb.0: # %entry
 ; RV32ZBA-NEXT:    mulhu a2, a0, a1
-; RV32ZBA-NEXT:    bnez a2, .LBB42_2
+; RV32ZBA-NEXT:    bnez a2, .LBB43_2
 ; RV32ZBA-NEXT:  # %bb.1: # %entry
 ; RV32ZBA-NEXT:    mv a0, a1
-; RV32ZBA-NEXT:  .LBB42_2: # %entry
+; RV32ZBA-NEXT:  .LBB43_2: # %entry
 ; RV32ZBA-NEXT:    ret
 ;
 ; RV64ZBA-LABEL: umulo.select.i32:
@@ -2274,10 +2325,10 @@ define i32 @umulo.select.i32(i32 %v1, i32 %v2) {
 ; RV64ZBA-NEXT:    zext.w a3, a0
 ; RV64ZBA-NEXT:    mul a2, a3, a2
 ; RV64ZBA-NEXT:    srli a2, a2, 32
-; RV64ZBA-NEXT:    bnez a2, .LBB42_2
+; RV64ZBA-NEXT:    bnez a2, .LBB43_2
 ; RV64ZBA-NEXT:  # %bb.1: # %entry
 ; RV64ZBA-NEXT:    mv a0, a1
-; RV64ZBA-NEXT:  .LBB42_2: # %entry
+; RV64ZBA-NEXT:  .LBB43_2: # %entry
 ; RV64ZBA-NEXT:    ret
 entry:
   %t = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %v1, i32 %v2)
@@ -2342,20 +2393,20 @@ define i64 @umulo.select.i64(i64 %v1, i64 %v2) {
 ; RV32-NEXT:    snez a5, a5
 ; RV32-NEXT:    or a4, a4, a5
 ; RV32-NEXT:    or a4, a4, a6
-; RV32-NEXT:    bnez a4, .LBB44_2
+; RV32-NEXT:    bnez a4, .LBB45_2
 ; RV32-NEXT:  # %bb.1: # %entry
 ; RV32-NEXT:    mv a0, a2
 ; RV32-NEXT:    mv a1, a3
-; RV32-NEXT:  .LBB44_2: # %entry
+; RV32-NEXT:  .LBB45_2: # %entry
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: umulo.select.i64:
 ; RV64:       # %bb.0: # %entry
 ; RV64-NEXT:    mulhu a2, a0, a1
-; RV64-NEXT:    bnez a2, .LBB44_2
+; RV64-NEXT:    bnez a2, .LBB45_2
 ; RV64-NEXT:  # %bb.1: # %entry
 ; RV64-NEXT:    mv a0, a1
-; RV64-NEXT:  .LBB44_2: # %entry
+; RV64-NEXT:  .LBB45_2: # %entry
 ; RV64-NEXT:    ret
 ;
 ; RV32ZBA-LABEL: umulo.select.i64:
@@ -2376,20 +2427,20 @@ define i64 @umulo.select.i64(i64 %v1, i64 %v2) {
 ; RV32ZBA-NEXT:    snez a5, a5
 ; RV32ZBA-NEXT:    or a4, a4, a5
 ; RV32ZBA-NEXT:    or a4, a4, a6
-; RV32ZBA-NEXT:    bnez a4, .LBB44_2
+; RV32ZBA-NEXT:    bnez a4, .LBB45_2
 ; RV32ZBA-NEXT:  # %bb.1: # %entry
 ; RV32ZBA-NEXT:    mv a0, a2
 ; RV32ZBA-NEXT:    mv a1, a3
-; RV32ZBA-NEXT:  .LBB44_2: # %entry
+; RV32ZBA-NEXT:  .LBB45_2: # %entry
 ; RV32ZBA-NEXT:    ret
 ;
 ; RV64ZBA-LABEL: umulo.select.i64:
 ; RV64ZBA:       # %bb.0: # %entry
 ; RV64ZBA-NEXT:    mulhu a2, a0, a1
-; RV64ZBA-NEXT:    bnez a2, .LBB44_2
+; RV64ZBA-NEXT:    bnez a2, .LBB45_2
 ; RV64ZBA-NEXT:  # %bb.1: # %entry
 ; RV64ZBA-NEXT:    mv a0, a1
-; RV64ZBA-NEXT:  .LBB44_2: # %entry
+; RV64ZBA-NEXT:  .LBB45_2: # %entry
 ; RV64ZBA-NEXT:    ret
 entry:
   %t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 %v2)
@@ -2469,11 +2520,11 @@ define zeroext i1 @saddo.br.i32(i32 %v1, i32 %v2) {
 ; RV32-NEXT:    add a2, a0, a1
 ; RV32-NEXT:    slt a0, a2, a0
 ; RV32-NEXT:    slti a1, a1, 0
-; RV32-NEXT:    beq a1, a0, .LBB46_2
+; RV32-NEXT:    beq a1, a0, .LBB47_2
 ; RV32-NEXT:  # %bb.1: # %overflow
 ; RV32-NEXT:    mv a0, zero
 ; RV32-NEXT:    ret
-; RV32-NEXT:  .LBB46_2: # %continue
+; RV32-NEXT:  .LBB47_2: # %continue
 ; RV32-NEXT:    addi a0, zero, 1
 ; RV32-NEXT:    ret
 ;
@@ -2483,11 +2534,11 @@ define zeroext i1 @saddo.br.i32(i32 %v1, i32 %v2) {
 ; RV64-NEXT:    sext.w a0, a0
 ; RV64-NEXT:    add a2, a0, a1
 ; RV64-NEXT:    addw a0, a0, a1
-; RV64-NEXT:    beq a0, a2, .LBB46_2
+; RV64-NEXT:    beq a0, a2, .LBB47_2
 ; RV64-NEXT:  # %bb.1: # %overflow
 ; RV64-NEXT:    mv a0, zero
 ; RV64-NEXT:    ret
-; RV64-NEXT:  .LBB46_2: # %continue
+; RV64-NEXT:  .LBB47_2: # %continue
 ; RV64-NEXT:    addi a0, zero, 1
 ; RV64-NEXT:    ret
 ;
@@ -2496,11 +2547,11 @@ define zeroext i1 @saddo.br.i32(i32 %v1, i32 %v2) {
 ; RV32ZBA-NEXT:    add a2, a0, a1
 ; RV32ZBA-NEXT:    slt a0, a2, a0
 ; RV32ZBA-NEXT:    slti a1, a1, 0
-; RV32ZBA-NEXT:    beq a1, a0, .LBB46_2
+; RV32ZBA-NEXT:    beq a1, a0, .LBB47_2
 ; RV32ZBA-NEXT:  # %bb.1: # %overflow
 ; RV32ZBA-NEXT:    mv a0, zero
 ; RV32ZBA-NEXT:    ret
-; RV32ZBA-NEXT:  .LBB46_2: # %continue
+; RV32ZBA-NEXT:  .LBB47_2: # %continue
 ; RV32ZBA-NEXT:    addi a0, zero, 1
 ; RV32ZBA-NEXT:    ret
 ;
@@ -2510,11 +2561,11 @@ define zeroext i1 @saddo.br.i32(i32 %v1, i32 %v2) {
 ; RV64ZBA-NEXT:    sext.w a0, a0
 ; RV64ZBA-NEXT:    add a2, a0, a1
 ; RV64ZBA-NEXT:    addw a0, a0, a1
-; RV64ZBA-NEXT:    beq a0, a2, .LBB46_2
+; RV64ZBA-NEXT:    beq a0, a2, .LBB47_2
 ; RV64ZBA-NEXT:  # %bb.1: # %overflow
 ; RV64ZBA-NEXT:    mv a0, zero
 ; RV64ZBA-NEXT:    ret
-; RV64ZBA-NEXT:  .LBB46_2: # %continue
+; RV64ZBA-NEXT:  .LBB47_2: # %continue
 ; RV64ZBA-NEXT:    addi a0, zero, 1
 ; RV64ZBA-NEXT:    ret
 entry:
@@ -2541,11 +2592,11 @@ define zeroext i1 @saddo.br.i64(i64 %v1, i64 %v2) {
 ; RV32-NEXT:    xor a1, a1, a3
 ; RV32-NEXT:    not a1, a1
 ; RV32-NEXT:    and a0, a1, a0
-; RV32-NEXT:    bgez a0, .LBB47_2
+; RV32-NEXT:    bgez a0, .LBB48_2
 ; RV32-NEXT:  # %bb.1: # %overflow
 ; RV32-NEXT:    mv a0, zero
 ; RV32-NEXT:    ret
-; RV32-NEXT:  .LBB47_2: # %continue
+; RV32-NEXT:  .LBB48_2: # %continue
 ; RV32-NEXT:    addi a0, zero, 1
 ; RV32-NEXT:    ret
 ;
@@ -2554,11 +2605,11 @@ define zeroext i1 @saddo.br.i64(i64 %v1, i64 %v2) {
 ; RV64-NEXT:    add a2, a0, a1
 ; RV64-NEXT:    slt a0, a2, a0
 ; RV64-NEXT:    slti a1, a1, 0
-; RV64-NEXT:    beq a1, a0, .LBB47_2
+; RV64-NEXT:    beq a1, a0, .LBB48_2
 ; RV64-NEXT:  # %bb.1: # %overflow
 ; RV64-NEXT:    mv a0, zero
 ; RV64-NEXT:    ret
-; RV64-NEXT:  .LBB47_2: # %continue
+; RV64-NEXT:  .LBB48_2: # %continue
 ; RV64-NEXT:    addi a0, zero, 1
 ; RV64-NEXT:    ret
 ;
@@ -2572,11 +2623,11 @@ define zeroext i1 @saddo.br.i64(i64 %v1, i64 %v2) {
 ; RV32ZBA-NEXT:    xor a1, a1, a3
 ; RV32ZBA-NEXT:    not a1, a1
 ; RV32ZBA-NEXT:    and a0, a1, a0
-; RV32ZBA-NEXT:    bgez a0, .LBB47_2
+; RV32ZBA-NEXT:    bgez a0, .LBB48_2
 ; RV32ZBA-NEXT:  # %bb.1: # %overflow
 ; RV32ZBA-NEXT:    mv a0, zero
 ; RV32ZBA-NEXT:    ret
-; RV32ZBA-NEXT:  .LBB47_2: # %continue
+; RV32ZBA-NEXT:  .LBB48_2: # %continue
 ; RV32ZBA-NEXT:    addi a0, zero, 1
 ; RV32ZBA-NEXT:    ret
 ;
@@ -2585,11 +2636,11 @@ define zeroext i1 @saddo.br.i64(i64 %v1, i64 %v2) {
 ; RV64ZBA-NEXT:    add a2, a0, a1
 ; RV64ZBA-NEXT:    slt a0, a2, a0
 ; RV64ZBA-NEXT:    slti a1, a1, 0
-; RV64ZBA-NEXT:    beq a1, a0, .LBB47_2
+; RV64ZBA-NEXT:    beq a1, a0, .LBB48_2
 ; RV64ZBA-NEXT:  # %bb.1: # %overflow
 ; RV64ZBA-NEXT:    mv a0, zero
 ; RV64ZBA-NEXT:    ret
-; RV64ZBA-NEXT:  .LBB47_2: # %continue
+; RV64ZBA-NEXT:  .LBB48_2: # %continue
 ; RV64ZBA-NEXT:    addi a0, zero, 1
 ; RV64ZBA-NEXT:    ret
 entry:
@@ -2609,11 +2660,11 @@ define zeroext i1 @uaddo.br.i32(i32 %v1, i32 %v2) {
 ; RV32-LABEL: uaddo.br.i32:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    add a1, a0, a1
-; RV32-NEXT:    bgeu a1, a0, .LBB48_2
+; RV32-NEXT:    bgeu a1, a0, .LBB49_2
 ; RV32-NEXT:  # %bb.1: # %overflow
 ; RV32-NEXT:    mv a0, zero
 ; RV32-NEXT:    ret
-; RV32-NEXT:  .LBB48_2: # %continue
+; RV32-NEXT:  .LBB49_2: # %continue
 ; RV32-NEXT:    addi a0, zero, 1
 ; RV32-NEXT:    ret
 ;
@@ -2621,22 +2672,22 @@ define zeroext i1 @uaddo.br.i32(i32 %v1, i32 %v2) {
 ; RV64:       # %bb.0: # %entry
 ; RV64-NEXT:    addw a1, a0, a1
 ; RV64-NEXT:    sext.w a0, a0
-; RV64-NEXT:    bgeu a1, a0, .LBB48_2
+; RV64-NEXT:    bgeu a1, a0, .LBB49_2
 ; RV64-NEXT:  # %bb.1: # %overflow
 ; RV64-NEXT:    mv a0, zero
 ; RV64-NEXT:    ret
-; RV64-NEXT:  .LBB48_2: # %continue
+; RV64-NEXT:  .LBB49_2: # %continue
 ; RV64-NEXT:    addi a0, zero, 1
 ; RV64-NEXT:    ret
 ;
 ; RV32ZBA-LABEL: uaddo.br.i32:
 ; RV32ZBA:       # %bb.0: # %entry
 ; RV32ZBA-NEXT:    add a1, a0, a1
-; RV32ZBA-NEXT:    bgeu a1, a0, .LBB48_2
+; RV32ZBA-NEXT:    bgeu a1, a0, .LBB49_2
 ; RV32ZBA-NEXT:  # %bb.1: # %overflow
 ; RV32ZBA-NEXT:    mv a0, zero
 ; RV32ZBA-NEXT:    ret
-; RV32ZBA-NEXT:  .LBB48_2: # %continue
+; RV32ZBA-NEXT:  .LBB49_2: # %continue
 ; RV32ZBA-NEXT:    addi a0, zero, 1
 ; RV32ZBA-NEXT:    ret
 ;
@@ -2644,11 +2695,11 @@ define zeroext i1 @uaddo.br.i32(i32 %v1, i32 %v2) {
 ; RV64ZBA:       # %bb.0: # %entry
 ; RV64ZBA-NEXT:    addw a1, a0, a1
 ; RV64ZBA-NEXT:    sext.w a0, a0
-; RV64ZBA-NEXT:    bgeu a1, a0, .LBB48_2
+; RV64ZBA-NEXT:    bgeu a1, a0, .LBB49_2
 ; RV64ZBA-NEXT:  # %bb.1: # %overflow
 ; RV64ZBA-NEXT:    mv a0, zero
 ; RV64ZBA-NEXT:    ret
-; RV64ZBA-NEXT:  .LBB48_2: # %continue
+; RV64ZBA-NEXT:  .LBB49_2: # %continue
 ; RV64ZBA-NEXT:    addi a0, zero, 1
 ; RV64ZBA-NEXT:    ret
 entry:
@@ -2671,26 +2722,26 @@ define zeroext i1 @uaddo.br.i64(i64 %v1, i64 %v2) {
 ; RV32-NEXT:    add a2, a0, a2
 ; RV32-NEXT:    sltu a0, a2, a0
 ; RV32-NEXT:    add a2, a3, a0
-; RV32-NEXT:    beq a2, a1, .LBB49_2
+; RV32-NEXT:    beq a2, a1, .LBB50_2
 ; RV32-NEXT:  # %bb.1: # %entry
 ; RV32-NEXT:    sltu a0, a2, a1
-; RV32-NEXT:  .LBB49_2: # %entry
-; RV32-NEXT:    beqz a0, .LBB49_4
+; RV32-NEXT:  .LBB50_2: # %entry
+; RV32-NEXT:    beqz a0, .LBB50_4
 ; RV32-NEXT:  # %bb.3: # %overflow
 ; RV32-NEXT:    mv a0, zero
 ; RV32-NEXT:    ret
-; RV32-NEXT:  .LBB49_4: # %continue
+; RV32-NEXT:  .LBB50_4: # %continue
 ; RV32-NEXT:    addi a0, zero, 1
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: uaddo.br.i64:
 ; RV64:       # %bb.0: # %entry
 ; RV64-NEXT:    add a1, a0, a1
-; RV64-NEXT:    bgeu a1, a0, .LBB49_2
+; RV64-NEXT:    bgeu a1, a0, .LBB50_2
 ; RV64-NEXT:  # %bb.1: # %overflow
 ; RV64-NEXT:    mv a0, zero
 ; RV64-NEXT:    ret
-; RV64-NEXT:  .LBB49_2: # %continue
+; RV64-NEXT:  .LBB50_2: # %continue
 ; RV64-NEXT:    addi a0, zero, 1
 ; RV64-NEXT:    ret
 ;
@@ -2700,26 +2751,26 @@ define zeroext i1 @uaddo.br.i64(i64 %v1, i64 %v2) {
 ; RV32ZBA-NEXT:    add a2, a0, a2
 ; RV32ZBA-NEXT:    sltu a0, a2, a0
 ; RV32ZBA-NEXT:    add a2, a3, a0
-; RV32ZBA-NEXT:    beq a2, a1, .LBB49_2
+; RV32ZBA-NEXT:    beq a2, a1, .LBB50_2
 ; RV32ZBA-NEXT:  # %bb.1: # %entry
 ; RV32ZBA-NEXT:    sltu a0, a2, a1
-; RV32ZBA-NEXT:  .LBB49_2: # %entry
-; RV32ZBA-NEXT:    beqz a0, .LBB49_4
+; RV32ZBA-NEXT:  .LBB50_2: # %entry
+; RV32ZBA-NEXT:    beqz a0, .LBB50_4
 ; RV32ZBA-NEXT:  # %bb.3: # %overflow
 ; RV32ZBA-NEXT:    mv a0, zero
 ; RV32ZBA-NEXT:    ret
-; RV32ZBA-NEXT:  .LBB49_4: # %continue
+; RV32ZBA-NEXT:  .LBB50_4: # %continue
 ; RV32ZBA-NEXT:    addi a0, zero, 1
 ; RV32ZBA-NEXT:    ret
 ;
 ; RV64ZBA-LABEL: uaddo.br.i64:
 ; RV64ZBA:       # %bb.0: # %entry
 ; RV64ZBA-NEXT:    add a1, a0, a1
-; RV64ZBA-NEXT:    bgeu a1, a0, .LBB49_2
+; RV64ZBA-NEXT:    bgeu a1, a0, .LBB50_2
 ; RV64ZBA-NEXT:  # %bb.1: # %overflow
 ; RV64ZBA-NEXT:    mv a0, zero
 ; RV64ZBA-NEXT:    ret
-; RV64ZBA-NEXT:  .LBB49_2: # %continue
+; RV64ZBA-NEXT:  .LBB50_2: # %continue
 ; RV64ZBA-NEXT:    addi a0, zero, 1
 ; RV64ZBA-NEXT:    ret
 entry:
@@ -2741,11 +2792,11 @@ define zeroext i1 @ssubo.br.i32(i32 %v1, i32 %v2) {
 ; RV32-NEXT:    sgtz a2, a1
 ; RV32-NEXT:    sub a1, a0, a1
 ; RV32-NEXT:    slt a0, a1, a0
-; RV32-NEXT:    beq a2, a0, .LBB50_2
+; RV32-NEXT:    beq a2, a0, .LBB51_2
 ; RV32-NEXT:  # %bb.1: # %overflow
 ; RV32-NEXT:    mv a0, zero
 ; RV32-NEXT:    ret
-; RV32-NEXT:  .LBB50_2: # %continue
+; RV32-NEXT:  .LBB51_2: # %continue
 ; RV32-NEXT:    addi a0, zero, 1
 ; RV32-NEXT:    ret
 ;
@@ -2755,11 +2806,11 @@ define zeroext i1 @ssubo.br.i32(i32 %v1, i32 %v2) {
 ; RV64-NEXT:    sext.w a0, a0
 ; RV64-NEXT:    sub a2, a0, a1
 ; RV64-NEXT:    subw a0, a0, a1
-; RV64-NEXT:    beq a0, a2, .LBB50_2
+; RV64-NEXT:    beq a0, a2, .LBB51_2
 ; RV64-NEXT:  # %bb.1: # %overflow
 ; RV64-NEXT:    mv a0, zero
 ; RV64-NEXT:    ret
-; RV64-NEXT:  .LBB50_2: # %continue
+; RV64-NEXT:  .LBB51_2: # %continue
 ; RV64-NEXT:    addi a0, zero, 1
 ; RV64-NEXT:    ret
 ;
@@ -2768,11 +2819,11 @@ define zeroext i1 @ssubo.br.i32(i32 %v1, i32 %v2) {
 ; RV32ZBA-NEXT:    sgtz a2, a1
 ; RV32ZBA-NEXT:    sub a1, a0, a1
 ; RV32ZBA-NEXT:    slt a0, a1, a0
-; RV32ZBA-NEXT:    beq a2, a0, .LBB50_2
+; RV32ZBA-NEXT:    beq a2, a0, .LBB51_2
 ; RV32ZBA-NEXT:  # %bb.1: # %overflow
 ; RV32ZBA-NEXT:    mv a0, zero
 ; RV32ZBA-NEXT:    ret
-; RV32ZBA-NEXT:  .LBB50_2: # %continue
+; RV32ZBA-NEXT:  .LBB51_2: # %continue
 ; RV32ZBA-NEXT:    addi a0, zero, 1
 ; RV32ZBA-NEXT:    ret
 ;
@@ -2782,11 +2833,11 @@ define zeroext i1 @ssubo.br.i32(i32 %v1, i32 %v2) {
 ; RV64ZBA-NEXT:    sext.w a0, a0
 ; RV64ZBA-NEXT:    sub a2, a0, a1
 ; RV64ZBA-NEXT:    subw a0, a0, a1
-; RV64ZBA-NEXT:    beq a0, a2, .LBB50_2
+; RV64ZBA-NEXT:    beq a0, a2, .LBB51_2
 ; RV64ZBA-NEXT:  # %bb.1: # %overflow
 ; RV64ZBA-NEXT:    mv a0, zero
 ; RV64ZBA-NEXT:    ret
-; RV64ZBA-NEXT:  .LBB50_2: # %continue
+; RV64ZBA-NEXT:  .LBB51_2: # %continue
 ; RV64ZBA-NEXT:    addi a0, zero, 1
 ; RV64ZBA-NEXT:    ret
 entry:
@@ -2811,11 +2862,11 @@ define zeroext i1 @ssubo.br.i64(i64 %v1, i64 %v2) {
 ; RV32-NEXT:    xor a0, a1, a0
 ; RV32-NEXT:    xor a1, a1, a3
 ; RV32-NEXT:    and a0, a1, a0
-; RV32-NEXT:    bgez a0, .LBB51_2
+; RV32-NEXT:    bgez a0, .LBB52_2
 ; RV32-NEXT:  # %bb.1: # %overflow
 ; RV32-NEXT:    mv a0, zero
 ; RV32-NEXT:    ret
-; RV32-NEXT:  .LBB51_2: # %continue
+; RV32-NEXT:  .LBB52_2: # %continue
 ; RV32-NEXT:    addi a0, zero, 1
 ; RV32-NEXT:    ret
 ;
@@ -2824,11 +2875,11 @@ define zeroext i1 @ssubo.br.i64(i64 %v1, i64 %v2) {
 ; RV64-NEXT:    sgtz a2, a1
 ; RV64-NEXT:    sub a1, a0, a1
 ; RV64-NEXT:    slt a0, a1, a0
-; RV64-NEXT:    beq a2, a0, .LBB51_2
+; RV64-NEXT:    beq a2, a0, .LBB52_2
 ; RV64-NEXT:  # %bb.1: # %overflow
 ; RV64-NEXT:    mv a0, zero
 ; RV64-NEXT:    ret
-; RV64-NEXT:  .LBB51_2: # %continue
+; RV64-NEXT:  .LBB52_2: # %continue
 ; RV64-NEXT:    addi a0, zero, 1
 ; RV64-NEXT:    ret
 ;
@@ -2840,11 +2891,11 @@ define zeroext i1 @ssubo.br.i64(i64 %v1, i64 %v2) {
 ; RV32ZBA-NEXT:    xor a0, a1, a0
 ; RV32ZBA-NEXT:    xor a1, a1, a3
 ; RV32ZBA-NEXT:    and a0, a1, a0
-; RV32ZBA-NEXT:    bgez a0, .LBB51_2
+; RV32ZBA-NEXT:    bgez a0, .LBB52_2
 ; RV32ZBA-NEXT:  # %bb.1: # %overflow
 ; RV32ZBA-NEXT:    mv a0, zero
 ; RV32ZBA-NEXT:    ret
-; RV32ZBA-NEXT:  .LBB51_2: # %continue
+; RV32ZBA-NEXT:  .LBB52_2: # %continue
 ; RV32ZBA-NEXT:    addi a0, zero, 1
 ; RV32ZBA-NEXT:    ret
 ;
@@ -2853,11 +2904,11 @@ define zeroext i1 @ssubo.br.i64(i64 %v1, i64 %v2) {
 ; RV64ZBA-NEXT:    sgtz a2, a1
 ; RV64ZBA-NEXT:    sub a1, a0, a1
 ; RV64ZBA-NEXT:    slt a0, a1, a0
-; RV64ZBA-NEXT:    beq a2, a0, .LBB51_2
+; RV64ZBA-NEXT:    beq a2, a0, .LBB52_2
 ; RV64ZBA-NEXT:  # %bb.1: # %overflow
 ; RV64ZBA-NEXT:    mv a0, zero
 ; RV64ZBA-NEXT:    ret
-; RV64ZBA-NEXT:  .LBB51_2: # %continue
+; RV64ZBA-NEXT:  .LBB52_2: # %continue
 ; RV64ZBA-NEXT:    addi a0, zero, 1
 ; RV64ZBA-NEXT:    ret
 entry:
@@ -2877,11 +2928,11 @@ define zeroext i1 @usubo.br.i32(i32 %v1, i32 %v2) {
 ; RV32-LABEL: usubo.br.i32:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    sub a1, a0, a1
-; RV32-NEXT:    bgeu a0, a1, .LBB52_2
+; RV32-NEXT:    bgeu a0, a1, .LBB53_2
 ; RV32-NEXT:  # %bb.1: # %overflow
 ; RV32-NEXT:    mv a0, zero
 ; RV32-NEXT:    ret
-; RV32-NEXT:  .LBB52_2: # %continue
+; RV32-NEXT:  .LBB53_2: # %continue
 ; RV32-NEXT:    addi a0, zero, 1
 ; RV32-NEXT:    ret
 ;
@@ -2889,22 +2940,22 @@ define zeroext i1 @usubo.br.i32(i32 %v1, i32 %v2) {
 ; RV64:       # %bb.0: # %entry
 ; RV64-NEXT:    subw a1, a0, a1
 ; RV64-NEXT:    sext.w a0, a0
-; RV64-NEXT:    bgeu a0, a1, .LBB52_2
+; RV64-NEXT:    bgeu a0, a1, .LBB53_2
 ; RV64-NEXT:  # %bb.1: # %overflow
 ; RV64-NEXT:    mv a0, zero
 ; RV64-NEXT:    ret
-; RV64-NEXT:  .LBB52_2: # %continue
+; RV64-NEXT:  .LBB53_2: # %continue
 ; RV64-NEXT:    addi a0, zero, 1
 ; RV64-NEXT:    ret
 ;
 ; RV32ZBA-LABEL: usubo.br.i32:
 ; RV32ZBA:       # %bb.0: # %entry
 ; RV32ZBA-NEXT:    sub a1, a0, a1
-; RV32ZBA-NEXT:    bgeu a0, a1, .LBB52_2
+; RV32ZBA-NEXT:    bgeu a0, a1, .LBB53_2
 ; RV32ZBA-NEXT:  # %bb.1: # %overflow
 ; RV32ZBA-NEXT:    mv a0, zero
 ; RV32ZBA-NEXT:    ret
-; RV32ZBA-NEXT:  .LBB52_2: # %continue
+; RV32ZBA-NEXT:  .LBB53_2: # %continue
 ; RV32ZBA-NEXT:    addi a0, zero, 1
 ; RV32ZBA-NEXT:    ret
 ;
@@ -2912,11 +2963,11 @@ define zeroext i1 @usubo.br.i32(i32 %v1, i32 %v2) {
 ; RV64ZBA:       # %bb.0: # %entry
 ; RV64ZBA-NEXT:    subw a1, a0, a1
 ; RV64ZBA-NEXT:    sext.w a0, a0
-; RV64ZBA-NEXT:    bgeu a0, a1, .LBB52_2
+; RV64ZBA-NEXT:    bgeu a0, a1, .LBB53_2
 ; RV64ZBA-NEXT:  # %bb.1: # %overflow
 ; RV64ZBA-NEXT:    mv a0, zero
 ; RV64ZBA-NEXT:    ret
-; RV64ZBA-NEXT:  .LBB52_2: # %continue
+; RV64ZBA-NEXT:  .LBB53_2: # %continue
 ; RV64ZBA-NEXT:    addi a0, zero, 1
 ; RV64ZBA-NEXT:    ret
 entry:
@@ -2938,29 +2989,29 @@ define zeroext i1 @usubo.br.i64(i64 %v1, i64 %v2) {
 ; RV32-NEXT:    sltu a4, a0, a2
 ; RV32-NEXT:    sub a3, a1, a3
 ; RV32-NEXT:    sub a3, a3, a4
-; RV32-NEXT:    beq a3, a1, .LBB53_3
+; RV32-NEXT:    beq a3, a1, .LBB54_3
 ; RV32-NEXT:  # %bb.1: # %entry
 ; RV32-NEXT:    sltu a0, a1, a3
-; RV32-NEXT:    bnez a0, .LBB53_4
-; RV32-NEXT:  .LBB53_2: # %continue
+; RV32-NEXT:    bnez a0, .LBB54_4
+; RV32-NEXT:  .LBB54_2: # %continue
 ; RV32-NEXT:    addi a0, zero, 1
 ; RV32-NEXT:    ret
-; RV32-NEXT:  .LBB53_3:
+; RV32-NEXT:  .LBB54_3:
 ; RV32-NEXT:    sub a1, a0, a2
 ; RV32-NEXT:    sltu a0, a0, a1
-; RV32-NEXT:    beqz a0, .LBB53_2
-; RV32-NEXT:  .LBB53_4: # %overflow
+; RV32-NEXT:    beqz a0, .LBB54_2
+; RV32-NEXT:  .LBB54_4: # %overflow
 ; RV32-NEXT:    mv a0, zero
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: usubo.br.i64:
 ; RV64:       # %bb.0: # %entry
 ; RV64-NEXT:    sub a1, a0, a1
-; RV64-NEXT:    bgeu a0, a1, .LBB53_2
+; RV64-NEXT:    bgeu a0, a1, .LBB54_2
 ; RV64-NEXT:  # %bb.1: # %overflow
 ; RV64-NEXT:    mv a0, zero
 ; RV64-NEXT:    ret
-; RV64-NEXT:  .LBB53_2: # %continue
+; RV64-NEXT:  .LBB54_2: # %continue
 ; RV64-NEXT:    addi a0, zero, 1
 ; RV64-NEXT:    ret
 ;
@@ -2969,29 +3020,29 @@ define zeroext i1 @usubo.br.i64(i64 %v1, i64 %v2) {
 ; RV32ZBA-NEXT:    sltu a4, a0, a2
 ; RV32ZBA-NEXT:    sub a3, a1, a3
 ; RV32ZBA-NEXT:    sub a3, a3, a4
-; RV32ZBA-NEXT:    beq a3, a1, .LBB53_3
+; RV32ZBA-NEXT:    beq a3, a1, .LBB54_3
 ; RV32ZBA-NEXT:  # %bb.1: # %entry
 ; RV32ZBA-NEXT:    sltu a0, a1, a3
-; RV32ZBA-NEXT:    bnez a0, .LBB53_4
-; RV32ZBA-NEXT:  .LBB53_2: # %continue
+; RV32ZBA-NEXT:    bnez a0, .LBB54_4
+; RV32ZBA-NEXT:  .LBB54_2: # %continue
 ; RV32ZBA-NEXT:    addi a0, zero, 1
 ; RV32ZBA-NEXT:    ret
-; RV32ZBA-NEXT:  .LBB53_3:
+; RV32ZBA-NEXT:  .LBB54_3:
 ; RV32ZBA-NEXT:    sub a1, a0, a2
 ; RV32ZBA-NEXT:    sltu a0, a0, a1
-; RV32ZBA-NEXT:    beqz a0, .LBB53_2
-; RV32ZBA-NEXT:  .LBB53_4: # %overflow
+; RV32ZBA-NEXT:    beqz a0, .LBB54_2
+; RV32ZBA-NEXT:  .LBB54_4: # %overflow
 ; RV32ZBA-NEXT:    mv a0, zero
 ; RV32ZBA-NEXT:    ret
 ;
 ; RV64ZBA-LABEL: usubo.br.i64:
 ; RV64ZBA:       # %bb.0: # %entry
 ; RV64ZBA-NEXT:    sub a1, a0, a1
-; RV64ZBA-NEXT:    bgeu a0, a1, .LBB53_2
+; RV64ZBA-NEXT:    bgeu a0, a1, .LBB54_2
 ; RV64ZBA-NEXT:  # %bb.1: # %overflow
 ; RV64ZBA-NEXT:    mv a0, zero
 ; RV64ZBA-NEXT:    ret
-; RV64ZBA-NEXT:  .LBB53_2: # %continue
+; RV64ZBA-NEXT:  .LBB54_2: # %continue
 ; RV64ZBA-NEXT:    addi a0, zero, 1
 ; RV64ZBA-NEXT:    ret
 entry:
@@ -3013,11 +3064,11 @@ define zeroext i1 @smulo.br.i32(i32 %v1, i32 %v2) {
 ; RV32-NEXT:    mulh a2, a0, a1
 ; RV32-NEXT:    mul a0, a0, a1
 ; RV32-NEXT:    srai a0, a0, 31
-; RV32-NEXT:    beq a2, a0, .LBB54_2
+; RV32-NEXT:    beq a2, a0, .LBB55_2
 ; RV32-NEXT:  # %bb.1: # %overflow
 ; RV32-NEXT:    mv a0, zero
 ; RV32-NEXT:    ret
-; RV32-NEXT:  .LBB54_2: # %continue
+; RV32-NEXT:  .LBB55_2: # %continue
 ; RV32-NEXT:    addi a0, zero, 1
 ; RV32-NEXT:    ret
 ;
@@ -3027,11 +3078,11 @@ define zeroext i1 @smulo.br.i32(i32 %v1, i32 %v2) {
 ; RV64-NEXT:    sext.w a0, a0
 ; RV64-NEXT:    mul a2, a0, a1
 ; RV64-NEXT:    mulw a0, a0, a1
-; RV64-NEXT:    beq a0, a2, .LBB54_2
+; RV64-NEXT:    beq a0, a2, .LBB55_2
 ; RV64-NEXT:  # %bb.1: # %overflow
 ; RV64-NEXT:    mv a0, zero
 ; RV64-NEXT:    ret
-; RV64-NEXT:  .LBB54_2: # %continue
+; RV64-NEXT:  .LBB55_2: # %continue
 ; RV64-NEXT:    addi a0, zero, 1
 ; RV64-NEXT:    ret
 ;
@@ -3040,11 +3091,11 @@ define zeroext i1 @smulo.br.i32(i32 %v1, i32 %v2) {
 ; RV32ZBA-NEXT:    mulh a2, a0, a1
 ; RV32ZBA-NEXT:    mul a0, a0, a1
 ; RV32ZBA-NEXT:    srai a0, a0, 31
-; RV32ZBA-NEXT:    beq a2, a0, .LBB54_2
+; RV32ZBA-NEXT:    beq a2, a0, .LBB55_2
 ; RV32ZBA-NEXT:  # %bb.1: # %overflow
 ; RV32ZBA-NEXT:    mv a0, zero
 ; RV32ZBA-NEXT:    ret
-; RV32ZBA-NEXT:  .LBB54_2: # %continue
+; RV32ZBA-NEXT:  .LBB55_2: # %continue
 ; RV32ZBA-NEXT:    addi a0, zero, 1
 ; RV32ZBA-NEXT:    ret
 ;
@@ -3054,11 +3105,11 @@ define zeroext i1 @smulo.br.i32(i32 %v1, i32 %v2) {
 ; RV64ZBA-NEXT:    sext.w a0, a0
 ; RV64ZBA-NEXT:    mul a2, a0, a1
 ; RV64ZBA-NEXT:    mulw a0, a0, a1
-; RV64ZBA-NEXT:    beq a0, a2, .LBB54_2
+; RV64ZBA-NEXT:    beq a0, a2, .LBB55_2
 ; RV64ZBA-NEXT:  # %bb.1: # %overflow
 ; RV64ZBA-NEXT:    mv a0, zero
 ; RV64ZBA-NEXT:    ret
-; RV64ZBA-NEXT:  .LBB54_2: # %continue
+; RV64ZBA-NEXT:  .LBB55_2: # %continue
 ; RV64ZBA-NEXT:    addi a0, zero, 1
 ; RV64ZBA-NEXT:    ret
 entry:
@@ -3085,13 +3136,13 @@ define zeroext i1 @smulo.br.i64(i64 %v1, i64 %v2) {
 ; RV32-NEXT:    addi a4, sp, 8
 ; RV32-NEXT:    call __mulodi4 at plt
 ; RV32-NEXT:    lw a0, 8(sp)
-; RV32-NEXT:    beqz a0, .LBB55_2
+; RV32-NEXT:    beqz a0, .LBB56_2
 ; RV32-NEXT:  # %bb.1: # %overflow
 ; RV32-NEXT:    mv a0, zero
-; RV32-NEXT:    j .LBB55_3
-; RV32-NEXT:  .LBB55_2: # %continue
+; RV32-NEXT:    j .LBB56_3
+; RV32-NEXT:  .LBB56_2: # %continue
 ; RV32-NEXT:    addi a0, zero, 1
-; RV32-NEXT:  .LBB55_3: # %overflow
+; RV32-NEXT:  .LBB56_3: # %overflow
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
@@ -3101,11 +3152,11 @@ define zeroext i1 @smulo.br.i64(i64 %v1, i64 %v2) {
 ; RV64-NEXT:    mulh a2, a0, a1
 ; RV64-NEXT:    mul a0, a0, a1
 ; RV64-NEXT:    srai a0, a0, 63
-; RV64-NEXT:    beq a2, a0, .LBB55_2
+; RV64-NEXT:    beq a2, a0, .LBB56_2
 ; RV64-NEXT:  # %bb.1: # %overflow
 ; RV64-NEXT:    mv a0, zero
 ; RV64-NEXT:    ret
-; RV64-NEXT:  .LBB55_2: # %continue
+; RV64-NEXT:  .LBB56_2: # %continue
 ; RV64-NEXT:    addi a0, zero, 1
 ; RV64-NEXT:    ret
 ;
@@ -3119,13 +3170,13 @@ define zeroext i1 @smulo.br.i64(i64 %v1, i64 %v2) {
 ; RV32ZBA-NEXT:    addi a4, sp, 8
 ; RV32ZBA-NEXT:    call __mulodi4 at plt
 ; RV32ZBA-NEXT:    lw a0, 8(sp)
-; RV32ZBA-NEXT:    beqz a0, .LBB55_2
+; RV32ZBA-NEXT:    beqz a0, .LBB56_2
 ; RV32ZBA-NEXT:  # %bb.1: # %overflow
 ; RV32ZBA-NEXT:    mv a0, zero
-; RV32ZBA-NEXT:    j .LBB55_3
-; RV32ZBA-NEXT:  .LBB55_2: # %continue
+; RV32ZBA-NEXT:    j .LBB56_3
+; RV32ZBA-NEXT:  .LBB56_2: # %continue
 ; RV32ZBA-NEXT:    addi a0, zero, 1
-; RV32ZBA-NEXT:  .LBB55_3: # %overflow
+; RV32ZBA-NEXT:  .LBB56_3: # %overflow
 ; RV32ZBA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32ZBA-NEXT:    addi sp, sp, 16
 ; RV32ZBA-NEXT:    ret
@@ -3135,11 +3186,11 @@ define zeroext i1 @smulo.br.i64(i64 %v1, i64 %v2) {
 ; RV64ZBA-NEXT:    mulh a2, a0, a1
 ; RV64ZBA-NEXT:    mul a0, a0, a1
 ; RV64ZBA-NEXT:    srai a0, a0, 63
-; RV64ZBA-NEXT:    beq a2, a0, .LBB55_2
+; RV64ZBA-NEXT:    beq a2, a0, .LBB56_2
 ; RV64ZBA-NEXT:  # %bb.1: # %overflow
 ; RV64ZBA-NEXT:    mv a0, zero
 ; RV64ZBA-NEXT:    ret
-; RV64ZBA-NEXT:  .LBB55_2: # %continue
+; RV64ZBA-NEXT:  .LBB56_2: # %continue
 ; RV64ZBA-NEXT:    addi a0, zero, 1
 ; RV64ZBA-NEXT:    ret
 entry:
@@ -3168,13 +3219,13 @@ define zeroext i1 @smulo2.br.i64(i64 %v1) {
 ; RV32-NEXT:    addi a4, sp, 8
 ; RV32-NEXT:    call __mulodi4 at plt
 ; RV32-NEXT:    lw a0, 8(sp)
-; RV32-NEXT:    beqz a0, .LBB56_2
+; RV32-NEXT:    beqz a0, .LBB57_2
 ; RV32-NEXT:  # %bb.1: # %overflow
 ; RV32-NEXT:    mv a0, zero
-; RV32-NEXT:    j .LBB56_3
-; RV32-NEXT:  .LBB56_2: # %continue
+; RV32-NEXT:    j .LBB57_3
+; RV32-NEXT:  .LBB57_2: # %continue
 ; RV32-NEXT:    addi a0, zero, 1
-; RV32-NEXT:  .LBB56_3: # %overflow
+; RV32-NEXT:  .LBB57_3: # %overflow
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
@@ -3185,11 +3236,11 @@ define zeroext i1 @smulo2.br.i64(i64 %v1) {
 ; RV64-NEXT:    mulh a2, a0, a1
 ; RV64-NEXT:    mul a0, a0, a1
 ; RV64-NEXT:    srai a0, a0, 63
-; RV64-NEXT:    beq a2, a0, .LBB56_2
+; RV64-NEXT:    beq a2, a0, .LBB57_2
 ; RV64-NEXT:  # %bb.1: # %overflow
 ; RV64-NEXT:    mv a0, zero
 ; RV64-NEXT:    ret
-; RV64-NEXT:  .LBB56_2: # %continue
+; RV64-NEXT:  .LBB57_2: # %continue
 ; RV64-NEXT:    addi a0, zero, 1
 ; RV64-NEXT:    ret
 ;
@@ -3205,13 +3256,13 @@ define zeroext i1 @smulo2.br.i64(i64 %v1) {
 ; RV32ZBA-NEXT:    addi a4, sp, 8
 ; RV32ZBA-NEXT:    call __mulodi4 at plt
 ; RV32ZBA-NEXT:    lw a0, 8(sp)
-; RV32ZBA-NEXT:    beqz a0, .LBB56_2
+; RV32ZBA-NEXT:    beqz a0, .LBB57_2
 ; RV32ZBA-NEXT:  # %bb.1: # %overflow
 ; RV32ZBA-NEXT:    mv a0, zero
-; RV32ZBA-NEXT:    j .LBB56_3
-; RV32ZBA-NEXT:  .LBB56_2: # %continue
+; RV32ZBA-NEXT:    j .LBB57_3
+; RV32ZBA-NEXT:  .LBB57_2: # %continue
 ; RV32ZBA-NEXT:    addi a0, zero, 1
-; RV32ZBA-NEXT:  .LBB56_3: # %overflow
+; RV32ZBA-NEXT:  .LBB57_3: # %overflow
 ; RV32ZBA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32ZBA-NEXT:    addi sp, sp, 16
 ; RV32ZBA-NEXT:    ret
@@ -3222,11 +3273,11 @@ define zeroext i1 @smulo2.br.i64(i64 %v1) {
 ; RV64ZBA-NEXT:    mulh a2, a0, a1
 ; RV64ZBA-NEXT:    mul a0, a0, a1
 ; RV64ZBA-NEXT:    srai a0, a0, 63
-; RV64ZBA-NEXT:    beq a2, a0, .LBB56_2
+; RV64ZBA-NEXT:    beq a2, a0, .LBB57_2
 ; RV64ZBA-NEXT:  # %bb.1: # %overflow
 ; RV64ZBA-NEXT:    mv a0, zero
 ; RV64ZBA-NEXT:    ret
-; RV64ZBA-NEXT:  .LBB56_2: # %continue
+; RV64ZBA-NEXT:  .LBB57_2: # %continue
 ; RV64ZBA-NEXT:    addi a0, zero, 1
 ; RV64ZBA-NEXT:    ret
 entry:
@@ -3246,11 +3297,11 @@ define zeroext i1 @umulo.br.i32(i32 %v1, i32 %v2) {
 ; RV32-LABEL: umulo.br.i32:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    mulhu a0, a0, a1
-; RV32-NEXT:    beqz a0, .LBB57_2
+; RV32-NEXT:    beqz a0, .LBB58_2
 ; RV32-NEXT:  # %bb.1: # %overflow
 ; RV32-NEXT:    mv a0, zero
 ; RV32-NEXT:    ret
-; RV32-NEXT:  .LBB57_2: # %continue
+; RV32-NEXT:  .LBB58_2: # %continue
 ; RV32-NEXT:    addi a0, zero, 1
 ; RV32-NEXT:    ret
 ;
@@ -3260,22 +3311,22 @@ define zeroext i1 @umulo.br.i32(i32 %v1, i32 %v2) {
 ; RV64-NEXT:    slli a0, a0, 32
 ; RV64-NEXT:    mulhu a0, a0, a1
 ; RV64-NEXT:    srli a0, a0, 32
-; RV64-NEXT:    beqz a0, .LBB57_2
+; RV64-NEXT:    beqz a0, .LBB58_2
 ; RV64-NEXT:  # %bb.1: # %overflow
 ; RV64-NEXT:    mv a0, zero
 ; RV64-NEXT:    ret
-; RV64-NEXT:  .LBB57_2: # %continue
+; RV64-NEXT:  .LBB58_2: # %continue
 ; RV64-NEXT:    addi a0, zero, 1
 ; RV64-NEXT:    ret
 ;
 ; RV32ZBA-LABEL: umulo.br.i32:
 ; RV32ZBA:       # %bb.0: # %entry
 ; RV32ZBA-NEXT:    mulhu a0, a0, a1
-; RV32ZBA-NEXT:    beqz a0, .LBB57_2
+; RV32ZBA-NEXT:    beqz a0, .LBB58_2
 ; RV32ZBA-NEXT:  # %bb.1: # %overflow
 ; RV32ZBA-NEXT:    mv a0, zero
 ; RV32ZBA-NEXT:    ret
-; RV32ZBA-NEXT:  .LBB57_2: # %continue
+; RV32ZBA-NEXT:  .LBB58_2: # %continue
 ; RV32ZBA-NEXT:    addi a0, zero, 1
 ; RV32ZBA-NEXT:    ret
 ;
@@ -3285,11 +3336,11 @@ define zeroext i1 @umulo.br.i32(i32 %v1, i32 %v2) {
 ; RV64ZBA-NEXT:    zext.w a0, a0
 ; RV64ZBA-NEXT:    mul a0, a0, a1
 ; RV64ZBA-NEXT:    srli a0, a0, 32
-; RV64ZBA-NEXT:    beqz a0, .LBB57_2
+; RV64ZBA-NEXT:    beqz a0, .LBB58_2
 ; RV64ZBA-NEXT:  # %bb.1: # %overflow
 ; RV64ZBA-NEXT:    mv a0, zero
 ; RV64ZBA-NEXT:    ret
-; RV64ZBA-NEXT:  .LBB57_2: # %continue
+; RV64ZBA-NEXT:  .LBB58_2: # %continue
 ; RV64ZBA-NEXT:    addi a0, zero, 1
 ; RV64ZBA-NEXT:    ret
 entry:
@@ -3324,22 +3375,22 @@ define zeroext i1 @umulo.br.i64(i64 %v1, i64 %v2) {
 ; RV32-NEXT:    snez a0, a0
 ; RV32-NEXT:    or a0, a1, a0
 ; RV32-NEXT:    or a0, a0, a6
-; RV32-NEXT:    beqz a0, .LBB58_2
+; RV32-NEXT:    beqz a0, .LBB59_2
 ; RV32-NEXT:  # %bb.1: # %overflow
 ; RV32-NEXT:    mv a0, zero
 ; RV32-NEXT:    ret
-; RV32-NEXT:  .LBB58_2: # %continue
+; RV32-NEXT:  .LBB59_2: # %continue
 ; RV32-NEXT:    addi a0, zero, 1
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: umulo.br.i64:
 ; RV64:       # %bb.0: # %entry
 ; RV64-NEXT:    mulhu a0, a0, a1
-; RV64-NEXT:    beqz a0, .LBB58_2
+; RV64-NEXT:    beqz a0, .LBB59_2
 ; RV64-NEXT:  # %bb.1: # %overflow
 ; RV64-NEXT:    mv a0, zero
 ; RV64-NEXT:    ret
-; RV64-NEXT:  .LBB58_2: # %continue
+; RV64-NEXT:  .LBB59_2: # %continue
 ; RV64-NEXT:    addi a0, zero, 1
 ; RV64-NEXT:    ret
 ;
@@ -3361,22 +3412,22 @@ define zeroext i1 @umulo.br.i64(i64 %v1, i64 %v2) {
 ; RV32ZBA-NEXT:    snez a0, a0
 ; RV32ZBA-NEXT:    or a0, a1, a0
 ; RV32ZBA-NEXT:    or a0, a0, a6
-; RV32ZBA-NEXT:    beqz a0, .LBB58_2
+; RV32ZBA-NEXT:    beqz a0, .LBB59_2
 ; RV32ZBA-NEXT:  # %bb.1: # %overflow
 ; RV32ZBA-NEXT:    mv a0, zero
 ; RV32ZBA-NEXT:    ret
-; RV32ZBA-NEXT:  .LBB58_2: # %continue
+; RV32ZBA-NEXT:  .LBB59_2: # %continue
 ; RV32ZBA-NEXT:    addi a0, zero, 1
 ; RV32ZBA-NEXT:    ret
 ;
 ; RV64ZBA-LABEL: umulo.br.i64:
 ; RV64ZBA:       # %bb.0: # %entry
 ; RV64ZBA-NEXT:    mulhu a0, a0, a1
-; RV64ZBA-NEXT:    beqz a0, .LBB58_2
+; RV64ZBA-NEXT:    beqz a0, .LBB59_2
 ; RV64ZBA-NEXT:  # %bb.1: # %overflow
 ; RV64ZBA-NEXT:    mv a0, zero
 ; RV64ZBA-NEXT:    ret
-; RV64ZBA-NEXT:  .LBB58_2: # %continue
+; RV64ZBA-NEXT:  .LBB59_2: # %continue
 ; RV64ZBA-NEXT:    addi a0, zero, 1
 ; RV64ZBA-NEXT:    ret
 entry:
@@ -3399,26 +3450,26 @@ define zeroext i1 @umulo2.br.i64(i64 %v1) {
 ; RV32-NEXT:    sltu a0, a2, a0
 ; RV32-NEXT:    add a2, a1, a1
 ; RV32-NEXT:    add a2, a2, a0
-; RV32-NEXT:    beq a2, a1, .LBB59_2
+; RV32-NEXT:    beq a2, a1, .LBB60_2
 ; RV32-NEXT:  # %bb.1: # %entry
 ; RV32-NEXT:    sltu a0, a2, a1
-; RV32-NEXT:  .LBB59_2: # %entry
-; RV32-NEXT:    beqz a0, .LBB59_4
+; RV32-NEXT:  .LBB60_2: # %entry
+; RV32-NEXT:    beqz a0, .LBB60_4
 ; RV32-NEXT:  # %bb.3: # %overflow
 ; RV32-NEXT:    mv a0, zero
 ; RV32-NEXT:    ret
-; RV32-NEXT:  .LBB59_4: # %continue
+; RV32-NEXT:  .LBB60_4: # %continue
 ; RV32-NEXT:    addi a0, zero, 1
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: umulo2.br.i64:
 ; RV64:       # %bb.0: # %entry
 ; RV64-NEXT:    add a1, a0, a0
-; RV64-NEXT:    bgeu a1, a0, .LBB59_2
+; RV64-NEXT:    bgeu a1, a0, .LBB60_2
 ; RV64-NEXT:  # %bb.1: # %overflow
 ; RV64-NEXT:    mv a0, zero
 ; RV64-NEXT:    ret
-; RV64-NEXT:  .LBB59_2: # %continue
+; RV64-NEXT:  .LBB60_2: # %continue
 ; RV64-NEXT:    addi a0, zero, 1
 ; RV64-NEXT:    ret
 ;
@@ -3428,26 +3479,26 @@ define zeroext i1 @umulo2.br.i64(i64 %v1) {
 ; RV32ZBA-NEXT:    sltu a0, a2, a0
 ; RV32ZBA-NEXT:    add a2, a1, a1
 ; RV32ZBA-NEXT:    add a2, a2, a0
-; RV32ZBA-NEXT:    beq a2, a1, .LBB59_2
+; RV32ZBA-NEXT:    beq a2, a1, .LBB60_2
 ; RV32ZBA-NEXT:  # %bb.1: # %entry
 ; RV32ZBA-NEXT:    sltu a0, a2, a1
-; RV32ZBA-NEXT:  .LBB59_2: # %entry
-; RV32ZBA-NEXT:    beqz a0, .LBB59_4
+; RV32ZBA-NEXT:  .LBB60_2: # %entry
+; RV32ZBA-NEXT:    beqz a0, .LBB60_4
 ; RV32ZBA-NEXT:  # %bb.3: # %overflow
 ; RV32ZBA-NEXT:    mv a0, zero
 ; RV32ZBA-NEXT:    ret
-; RV32ZBA-NEXT:  .LBB59_4: # %continue
+; RV32ZBA-NEXT:  .LBB60_4: # %continue
 ; RV32ZBA-NEXT:    addi a0, zero, 1
 ; RV32ZBA-NEXT:    ret
 ;
 ; RV64ZBA-LABEL: umulo2.br.i64:
 ; RV64ZBA:       # %bb.0: # %entry
 ; RV64ZBA-NEXT:    add a1, a0, a0
-; RV64ZBA-NEXT:    bgeu a1, a0, .LBB59_2
+; RV64ZBA-NEXT:    bgeu a1, a0, .LBB60_2
 ; RV64ZBA-NEXT:  # %bb.1: # %overflow
 ; RV64ZBA-NEXT:    mv a0, zero
 ; RV64ZBA-NEXT:    ret
-; RV64ZBA-NEXT:  .LBB59_2: # %continue
+; RV64ZBA-NEXT:  .LBB60_2: # %continue
 ; RV64ZBA-NEXT:    addi a0, zero, 1
 ; RV64ZBA-NEXT:    ret
 entry:


        


More information about the llvm-commits mailing list