[llvm] bad0f98 - [ExpandMemCmp][AArch][RISCV][X86] Pre-commit tests for recognizing canonical form of (icmp sle/sge X, 0). NFC
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Fri Jan 3 10:05:58 PST 2025
Author: Craig Topper
Date: 2025-01-03T10:02:42-08:00
New Revision: bad0f98bda1ca0b8a106b14b9cce98bf1dbc15cc
URL: https://github.com/llvm/llvm-project/commit/bad0f98bda1ca0b8a106b14b9cce98bf1dbc15cc
DIFF: https://github.com/llvm/llvm-project/commit/bad0f98bda1ca0b8a106b14b9cce98bf1dbc15cc.diff
LOG: [ExpandMemCmp][AArch][RISCV][X86] Pre-commit tests for recognizing canonical form of (icmp sle/sge X, 0). NFC
Pre-commit for #121540.
Added:
Modified:
llvm/test/CodeGen/AArch64/memcmp.ll
llvm/test/CodeGen/RISCV/memcmp.ll
llvm/test/CodeGen/X86/memcmp.ll
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/AArch64/memcmp.ll b/llvm/test/CodeGen/AArch64/memcmp.ll
index 4f58fd74d7d508..864f38468842a1 100644
--- a/llvm/test/CodeGen/AArch64/memcmp.ll
+++ b/llvm/test/CodeGen/AArch64/memcmp.ll
@@ -257,6 +257,42 @@ define i1 @length4_gt(ptr %X, ptr %Y) nounwind {
ret i1 %c
}
+define i1 @length4_le(ptr %X, ptr %Y) nounwind {
+; CHECK-LABEL: length4_le:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr w8, [x0]
+; CHECK-NEXT: ldr w9, [x1]
+; CHECK-NEXT: rev w8, w8
+; CHECK-NEXT: rev w9, w9
+; CHECK-NEXT: cmp w8, w9
+; CHECK-NEXT: cset w8, hi
+; CHECK-NEXT: csinv w8, w8, wzr, hs
+; CHECK-NEXT: cmp w8, #1
+; CHECK-NEXT: cset w0, lt
+; CHECK-NEXT: ret
+ %m = tail call i32 @memcmp(ptr %X, ptr %Y, i64 4) nounwind
+ %c = icmp slt i32 %m, 1
+ ret i1 %c
+}
+
+define i1 @length4_ge(ptr %X, ptr %Y) nounwind {
+; CHECK-LABEL: length4_ge:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr w8, [x0]
+; CHECK-NEXT: ldr w9, [x1]
+; CHECK-NEXT: rev w8, w8
+; CHECK-NEXT: rev w9, w9
+; CHECK-NEXT: cmp w8, w9
+; CHECK-NEXT: cset w8, hi
+; CHECK-NEXT: csinv w8, w8, wzr, hs
+; CHECK-NEXT: mvn w8, w8
+; CHECK-NEXT: lsr w0, w8, #31
+; CHECK-NEXT: ret
+ %m = tail call i32 @memcmp(ptr %X, ptr %Y, i64 4) nounwind
+ %c = icmp sgt i32 %m, -1
+ ret i1 %c
+}
+
define i1 @length4_eq_const(ptr %X) nounwind {
; CHECK-LABEL: length4_eq_const:
; CHECK: // %bb.0:
@@ -371,18 +407,18 @@ define i32 @length7(ptr %X, ptr %Y) nounwind {
; CHECK-NEXT: rev w8, w8
; CHECK-NEXT: rev w9, w9
; CHECK-NEXT: cmp w8, w9
-; CHECK-NEXT: b.ne .LBB24_3
+; CHECK-NEXT: b.ne .LBB26_3
; CHECK-NEXT: // %bb.1: // %loadbb1
; CHECK-NEXT: ldur w8, [x0, #3]
; CHECK-NEXT: ldur w9, [x1, #3]
; CHECK-NEXT: rev w8, w8
; CHECK-NEXT: rev w9, w9
; CHECK-NEXT: cmp w8, w9
-; CHECK-NEXT: b.ne .LBB24_3
+; CHECK-NEXT: b.ne .LBB26_3
; CHECK-NEXT: // %bb.2:
; CHECK-NEXT: mov w0, wzr
; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB24_3: // %res_block
+; CHECK-NEXT: .LBB26_3: // %res_block
; CHECK-NEXT: cmp w8, w9
; CHECK-NEXT: mov w8, #-1 // =0xffffffff
; CHECK-NEXT: cneg w0, w8, hs
@@ -399,18 +435,18 @@ define i1 @length7_lt(ptr %X, ptr %Y) nounwind {
; CHECK-NEXT: rev w8, w8
; CHECK-NEXT: rev w9, w9
; CHECK-NEXT: cmp w8, w9
-; CHECK-NEXT: b.ne .LBB25_3
+; CHECK-NEXT: b.ne .LBB27_3
; CHECK-NEXT: // %bb.1: // %loadbb1
; CHECK-NEXT: ldur w8, [x0, #3]
; CHECK-NEXT: ldur w9, [x1, #3]
; CHECK-NEXT: rev w8, w8
; CHECK-NEXT: rev w9, w9
; CHECK-NEXT: cmp w8, w9
-; CHECK-NEXT: b.ne .LBB25_3
+; CHECK-NEXT: b.ne .LBB27_3
; CHECK-NEXT: // %bb.2:
; CHECK-NEXT: lsr w0, wzr, #31
; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB25_3: // %res_block
+; CHECK-NEXT: .LBB27_3: // %res_block
; CHECK-NEXT: cmp w8, w9
; CHECK-NEXT: mov w8, #-1 // =0xffffffff
; CHECK-NEXT: cneg w8, w8, hs
@@ -489,13 +525,13 @@ define i32 @length9(ptr %X, ptr %Y) nounwind {
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB30_2
+; CHECK-NEXT: b.ne .LBB32_2
; CHECK-NEXT: // %bb.1: // %loadbb1
; CHECK-NEXT: ldrb w8, [x0, #8]
; CHECK-NEXT: ldrb w9, [x1, #8]
; CHECK-NEXT: sub w0, w8, w9
; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB30_2: // %res_block
+; CHECK-NEXT: .LBB32_2: // %res_block
; CHECK-NEXT: mov w8, #-1 // =0xffffffff
; CHECK-NEXT: cneg w0, w8, hs
; CHECK-NEXT: ret
@@ -527,7 +563,7 @@ define i32 @length10(ptr %X, ptr %Y) nounwind {
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB32_3
+; CHECK-NEXT: b.ne .LBB34_3
; CHECK-NEXT: // %bb.1: // %loadbb1
; CHECK-NEXT: ldrh w8, [x0, #8]
; CHECK-NEXT: ldrh w9, [x1, #8]
@@ -536,11 +572,11 @@ define i32 @length10(ptr %X, ptr %Y) nounwind {
; CHECK-NEXT: lsr w8, w8, #16
; CHECK-NEXT: lsr w9, w9, #16
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB32_3
+; CHECK-NEXT: b.ne .LBB34_3
; CHECK-NEXT: // %bb.2:
; CHECK-NEXT: mov w0, wzr
; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB32_3: // %res_block
+; CHECK-NEXT: .LBB34_3: // %res_block
; CHECK-NEXT: cmp x8, x9
; CHECK-NEXT: mov w8, #-1 // =0xffffffff
; CHECK-NEXT: cneg w0, w8, hs
@@ -573,18 +609,18 @@ define i32 @length11(ptr %X, ptr %Y) nounwind {
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB34_3
+; CHECK-NEXT: b.ne .LBB36_3
; CHECK-NEXT: // %bb.1: // %loadbb1
; CHECK-NEXT: ldur x8, [x0, #3]
; CHECK-NEXT: ldur x9, [x1, #3]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB34_3
+; CHECK-NEXT: b.ne .LBB36_3
; CHECK-NEXT: // %bb.2:
; CHECK-NEXT: mov w0, wzr
; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB34_3: // %res_block
+; CHECK-NEXT: .LBB36_3: // %res_block
; CHECK-NEXT: cmp x8, x9
; CHECK-NEXT: mov w8, #-1 // =0xffffffff
; CHECK-NEXT: cneg w0, w8, hs
@@ -633,18 +669,18 @@ define i32 @length12(ptr %X, ptr %Y) nounwind {
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB37_3
+; CHECK-NEXT: b.ne .LBB39_3
; CHECK-NEXT: // %bb.1: // %loadbb1
; CHECK-NEXT: ldr w8, [x0, #8]
; CHECK-NEXT: ldr w9, [x1, #8]
; CHECK-NEXT: rev w8, w8
; CHECK-NEXT: rev w9, w9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB37_3
+; CHECK-NEXT: b.ne .LBB39_3
; CHECK-NEXT: // %bb.2:
; CHECK-NEXT: mov w0, wzr
; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB37_3: // %res_block
+; CHECK-NEXT: .LBB39_3: // %res_block
; CHECK-NEXT: cmp x8, x9
; CHECK-NEXT: mov w8, #-1 // =0xffffffff
; CHECK-NEXT: cneg w0, w8, hs
@@ -693,18 +729,18 @@ define i32 @length15(ptr %X, ptr %Y) nounwind {
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB40_3
+; CHECK-NEXT: b.ne .LBB42_3
; CHECK-NEXT: // %bb.1: // %loadbb1
; CHECK-NEXT: ldur x8, [x0, #7]
; CHECK-NEXT: ldur x9, [x1, #7]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB40_3
+; CHECK-NEXT: b.ne .LBB42_3
; CHECK-NEXT: // %bb.2:
; CHECK-NEXT: mov w0, wzr
; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB40_3: // %res_block
+; CHECK-NEXT: .LBB42_3: // %res_block
; CHECK-NEXT: cmp x8, x9
; CHECK-NEXT: mov w8, #-1 // =0xffffffff
; CHECK-NEXT: cneg w0, w8, hs
@@ -721,18 +757,18 @@ define i1 @length15_lt(ptr %X, ptr %Y) nounwind {
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB41_3
+; CHECK-NEXT: b.ne .LBB43_3
; CHECK-NEXT: // %bb.1: // %loadbb1
; CHECK-NEXT: ldur x8, [x0, #7]
; CHECK-NEXT: ldur x9, [x1, #7]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB41_3
+; CHECK-NEXT: b.ne .LBB43_3
; CHECK-NEXT: // %bb.2:
; CHECK-NEXT: lsr w0, wzr, #31
; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB41_3: // %res_block
+; CHECK-NEXT: .LBB43_3: // %res_block
; CHECK-NEXT: cmp x8, x9
; CHECK-NEXT: mov w8, #-1 // =0xffffffff
; CHECK-NEXT: cneg w8, w8, hs
@@ -753,7 +789,7 @@ define i32 @length15_const(ptr %X, ptr %Y) nounwind {
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: movk x8, #12594, lsl #48
; CHECK-NEXT: cmp x9, x8
-; CHECK-NEXT: b.ne .LBB42_3
+; CHECK-NEXT: b.ne .LBB44_3
; CHECK-NEXT: // %bb.1: // %loadbb1
; CHECK-NEXT: mov x8, #13365 // =0x3435
; CHECK-NEXT: ldur x9, [x0, #7]
@@ -762,11 +798,11 @@ define i32 @length15_const(ptr %X, ptr %Y) nounwind {
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: movk x8, #14393, lsl #48
; CHECK-NEXT: cmp x9, x8
-; CHECK-NEXT: b.ne .LBB42_3
+; CHECK-NEXT: b.ne .LBB44_3
; CHECK-NEXT: // %bb.2:
; CHECK-NEXT: mov w0, wzr
; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB42_3: // %res_block
+; CHECK-NEXT: .LBB44_3: // %res_block
; CHECK-NEXT: cmp x9, x8
; CHECK-NEXT: mov w8, #-1 // =0xffffffff
; CHECK-NEXT: cneg w0, w8, hs
@@ -801,7 +837,7 @@ define i1 @length15_gt_const(ptr %X, ptr %Y) nounwind {
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: movk x8, #12594, lsl #48
; CHECK-NEXT: cmp x9, x8
-; CHECK-NEXT: b.ne .LBB44_3
+; CHECK-NEXT: b.ne .LBB46_3
; CHECK-NEXT: // %bb.1: // %loadbb1
; CHECK-NEXT: mov x8, #13365 // =0x3435
; CHECK-NEXT: ldur x9, [x0, #7]
@@ -810,15 +846,15 @@ define i1 @length15_gt_const(ptr %X, ptr %Y) nounwind {
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: movk x8, #14393, lsl #48
; CHECK-NEXT: cmp x9, x8
-; CHECK-NEXT: b.ne .LBB44_3
+; CHECK-NEXT: b.ne .LBB46_3
; CHECK-NEXT: // %bb.2:
; CHECK-NEXT: mov w8, wzr
-; CHECK-NEXT: b .LBB44_4
-; CHECK-NEXT: .LBB44_3: // %res_block
+; CHECK-NEXT: b .LBB46_4
+; CHECK-NEXT: .LBB46_3: // %res_block
; CHECK-NEXT: cmp x9, x8
; CHECK-NEXT: mov w8, #-1 // =0xffffffff
; CHECK-NEXT: cneg w8, w8, hs
-; CHECK-NEXT: .LBB44_4: // %endblock
+; CHECK-NEXT: .LBB46_4: // %endblock
; CHECK-NEXT: cmp w8, #0
; CHECK-NEXT: cset w0, gt
; CHECK-NEXT: ret
@@ -836,18 +872,18 @@ define i32 @length16(ptr %X, ptr %Y) nounwind {
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB45_3
+; CHECK-NEXT: b.ne .LBB47_3
; CHECK-NEXT: // %bb.1: // %loadbb1
; CHECK-NEXT: ldr x8, [x0, #8]
; CHECK-NEXT: ldr x9, [x1, #8]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB45_3
+; CHECK-NEXT: b.ne .LBB47_3
; CHECK-NEXT: // %bb.2:
; CHECK-NEXT: mov w0, wzr
; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB45_3: // %res_block
+; CHECK-NEXT: .LBB47_3: // %res_block
; CHECK-NEXT: cmp x8, x9
; CHECK-NEXT: mov w8, #-1 // =0xffffffff
; CHECK-NEXT: cneg w0, w8, hs
@@ -878,18 +914,18 @@ define i1 @length16_lt(ptr %x, ptr %y) nounwind {
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB47_3
+; CHECK-NEXT: b.ne .LBB49_3
; CHECK-NEXT: // %bb.1: // %loadbb1
; CHECK-NEXT: ldr x8, [x0, #8]
; CHECK-NEXT: ldr x9, [x1, #8]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB47_3
+; CHECK-NEXT: b.ne .LBB49_3
; CHECK-NEXT: // %bb.2:
; CHECK-NEXT: lsr w0, wzr, #31
; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB47_3: // %res_block
+; CHECK-NEXT: .LBB49_3: // %res_block
; CHECK-NEXT: cmp x8, x9
; CHECK-NEXT: mov w8, #-1 // =0xffffffff
; CHECK-NEXT: cneg w8, w8, hs
@@ -908,22 +944,22 @@ define i1 @length16_gt(ptr %x, ptr %y) nounwind {
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB48_3
+; CHECK-NEXT: b.ne .LBB50_3
; CHECK-NEXT: // %bb.1: // %loadbb1
; CHECK-NEXT: ldr x8, [x0, #8]
; CHECK-NEXT: ldr x9, [x1, #8]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB48_3
+; CHECK-NEXT: b.ne .LBB50_3
; CHECK-NEXT: // %bb.2:
; CHECK-NEXT: mov w8, wzr
-; CHECK-NEXT: b .LBB48_4
-; CHECK-NEXT: .LBB48_3: // %res_block
+; CHECK-NEXT: b .LBB50_4
+; CHECK-NEXT: .LBB50_3: // %res_block
; CHECK-NEXT: cmp x8, x9
; CHECK-NEXT: mov w8, #-1 // =0xffffffff
; CHECK-NEXT: cneg w8, w8, hs
-; CHECK-NEXT: .LBB48_4: // %endblock
+; CHECK-NEXT: .LBB50_4: // %endblock
; CHECK-NEXT: cmp w8, #0
; CHECK-NEXT: cset w0, gt
; CHECK-NEXT: ret
@@ -962,25 +998,25 @@ define i32 @length24(ptr %X, ptr %Y) nounwind {
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB50_4
+; CHECK-NEXT: b.ne .LBB52_4
; CHECK-NEXT: // %bb.1: // %loadbb1
; CHECK-NEXT: ldr x8, [x0, #8]
; CHECK-NEXT: ldr x9, [x1, #8]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB50_4
+; CHECK-NEXT: b.ne .LBB52_4
; CHECK-NEXT: // %bb.2: // %loadbb2
; CHECK-NEXT: ldr x8, [x0, #16]
; CHECK-NEXT: ldr x9, [x1, #16]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB50_4
+; CHECK-NEXT: b.ne .LBB52_4
; CHECK-NEXT: // %bb.3:
; CHECK-NEXT: mov w0, wzr
; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB50_4: // %res_block
+; CHECK-NEXT: .LBB52_4: // %res_block
; CHECK-NEXT: cmp x8, x9
; CHECK-NEXT: mov w8, #-1 // =0xffffffff
; CHECK-NEXT: cneg w0, w8, hs
@@ -1014,25 +1050,25 @@ define i1 @length24_lt(ptr %x, ptr %y) nounwind {
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB52_4
+; CHECK-NEXT: b.ne .LBB54_4
; CHECK-NEXT: // %bb.1: // %loadbb1
; CHECK-NEXT: ldr x8, [x0, #8]
; CHECK-NEXT: ldr x9, [x1, #8]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB52_4
+; CHECK-NEXT: b.ne .LBB54_4
; CHECK-NEXT: // %bb.2: // %loadbb2
; CHECK-NEXT: ldr x8, [x0, #16]
; CHECK-NEXT: ldr x9, [x1, #16]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB52_4
+; CHECK-NEXT: b.ne .LBB54_4
; CHECK-NEXT: // %bb.3:
; CHECK-NEXT: lsr w0, wzr, #31
; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB52_4: // %res_block
+; CHECK-NEXT: .LBB54_4: // %res_block
; CHECK-NEXT: cmp x8, x9
; CHECK-NEXT: mov w8, #-1 // =0xffffffff
; CHECK-NEXT: cneg w8, w8, hs
@@ -1051,29 +1087,29 @@ define i1 @length24_gt(ptr %x, ptr %y) nounwind {
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB53_4
+; CHECK-NEXT: b.ne .LBB55_4
; CHECK-NEXT: // %bb.1: // %loadbb1
; CHECK-NEXT: ldr x8, [x0, #8]
; CHECK-NEXT: ldr x9, [x1, #8]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB53_4
+; CHECK-NEXT: b.ne .LBB55_4
; CHECK-NEXT: // %bb.2: // %loadbb2
; CHECK-NEXT: ldr x8, [x0, #16]
; CHECK-NEXT: ldr x9, [x1, #16]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB53_4
+; CHECK-NEXT: b.ne .LBB55_4
; CHECK-NEXT: // %bb.3:
; CHECK-NEXT: mov w8, wzr
-; CHECK-NEXT: b .LBB53_5
-; CHECK-NEXT: .LBB53_4: // %res_block
+; CHECK-NEXT: b .LBB55_5
+; CHECK-NEXT: .LBB55_4: // %res_block
; CHECK-NEXT: cmp x8, x9
; CHECK-NEXT: mov w8, #-1 // =0xffffffff
; CHECK-NEXT: cneg w8, w8, hs
-; CHECK-NEXT: .LBB53_5: // %endblock
+; CHECK-NEXT: .LBB55_5: // %endblock
; CHECK-NEXT: cmp w8, #0
; CHECK-NEXT: cset w0, gt
; CHECK-NEXT: ret
@@ -1117,32 +1153,32 @@ define i32 @length31(ptr %X, ptr %Y) nounwind {
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB55_5
+; CHECK-NEXT: b.ne .LBB57_5
; CHECK-NEXT: // %bb.1: // %loadbb1
; CHECK-NEXT: ldr x8, [x0, #8]
; CHECK-NEXT: ldr x9, [x1, #8]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB55_5
+; CHECK-NEXT: b.ne .LBB57_5
; CHECK-NEXT: // %bb.2: // %loadbb2
; CHECK-NEXT: ldr x8, [x0, #16]
; CHECK-NEXT: ldr x9, [x1, #16]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB55_5
+; CHECK-NEXT: b.ne .LBB57_5
; CHECK-NEXT: // %bb.3: // %loadbb3
; CHECK-NEXT: ldur x8, [x0, #23]
; CHECK-NEXT: ldur x9, [x1, #23]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB55_5
+; CHECK-NEXT: b.ne .LBB57_5
; CHECK-NEXT: // %bb.4:
; CHECK-NEXT: mov w0, wzr
; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB55_5: // %res_block
+; CHECK-NEXT: .LBB57_5: // %res_block
; CHECK-NEXT: cmp x8, x9
; CHECK-NEXT: mov w8, #-1 // =0xffffffff
; CHECK-NEXT: cneg w0, w8, hs
@@ -1179,32 +1215,32 @@ define i1 @length31_lt(ptr %x, ptr %y) nounwind {
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB57_5
+; CHECK-NEXT: b.ne .LBB59_5
; CHECK-NEXT: // %bb.1: // %loadbb1
; CHECK-NEXT: ldr x8, [x0, #8]
; CHECK-NEXT: ldr x9, [x1, #8]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB57_5
+; CHECK-NEXT: b.ne .LBB59_5
; CHECK-NEXT: // %bb.2: // %loadbb2
; CHECK-NEXT: ldr x8, [x0, #16]
; CHECK-NEXT: ldr x9, [x1, #16]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB57_5
+; CHECK-NEXT: b.ne .LBB59_5
; CHECK-NEXT: // %bb.3: // %loadbb3
; CHECK-NEXT: ldur x8, [x0, #23]
; CHECK-NEXT: ldur x9, [x1, #23]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB57_5
+; CHECK-NEXT: b.ne .LBB59_5
; CHECK-NEXT: // %bb.4:
; CHECK-NEXT: lsr w0, wzr, #31
; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB57_5: // %res_block
+; CHECK-NEXT: .LBB59_5: // %res_block
; CHECK-NEXT: cmp x8, x9
; CHECK-NEXT: mov w8, #-1 // =0xffffffff
; CHECK-NEXT: cneg w8, w8, hs
@@ -1223,36 +1259,36 @@ define i1 @length31_gt(ptr %x, ptr %y) nounwind {
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB58_5
+; CHECK-NEXT: b.ne .LBB60_5
; CHECK-NEXT: // %bb.1: // %loadbb1
; CHECK-NEXT: ldr x8, [x0, #8]
; CHECK-NEXT: ldr x9, [x1, #8]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB58_5
+; CHECK-NEXT: b.ne .LBB60_5
; CHECK-NEXT: // %bb.2: // %loadbb2
; CHECK-NEXT: ldr x8, [x0, #16]
; CHECK-NEXT: ldr x9, [x1, #16]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB58_5
+; CHECK-NEXT: b.ne .LBB60_5
; CHECK-NEXT: // %bb.3: // %loadbb3
; CHECK-NEXT: ldur x8, [x0, #23]
; CHECK-NEXT: ldur x9, [x1, #23]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB58_5
+; CHECK-NEXT: b.ne .LBB60_5
; CHECK-NEXT: // %bb.4:
; CHECK-NEXT: mov w8, wzr
-; CHECK-NEXT: b .LBB58_6
-; CHECK-NEXT: .LBB58_5: // %res_block
+; CHECK-NEXT: b .LBB60_6
+; CHECK-NEXT: .LBB60_5: // %res_block
; CHECK-NEXT: cmp x8, x9
; CHECK-NEXT: mov w8, #-1 // =0xffffffff
; CHECK-NEXT: cneg w8, w8, hs
-; CHECK-NEXT: .LBB58_6: // %endblock
+; CHECK-NEXT: .LBB60_6: // %endblock
; CHECK-NEXT: cmp w8, #0
; CHECK-NEXT: cset w0, gt
; CHECK-NEXT: ret
@@ -1322,32 +1358,32 @@ define i32 @length32(ptr %X, ptr %Y) nounwind {
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB61_5
+; CHECK-NEXT: b.ne .LBB63_5
; CHECK-NEXT: // %bb.1: // %loadbb1
; CHECK-NEXT: ldr x8, [x0, #8]
; CHECK-NEXT: ldr x9, [x1, #8]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB61_5
+; CHECK-NEXT: b.ne .LBB63_5
; CHECK-NEXT: // %bb.2: // %loadbb2
; CHECK-NEXT: ldr x8, [x0, #16]
; CHECK-NEXT: ldr x9, [x1, #16]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB61_5
+; CHECK-NEXT: b.ne .LBB63_5
; CHECK-NEXT: // %bb.3: // %loadbb3
; CHECK-NEXT: ldr x8, [x0, #24]
; CHECK-NEXT: ldr x9, [x1, #24]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB61_5
+; CHECK-NEXT: b.ne .LBB63_5
; CHECK-NEXT: // %bb.4:
; CHECK-NEXT: mov w0, wzr
; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB61_5: // %res_block
+; CHECK-NEXT: .LBB63_5: // %res_block
; CHECK-NEXT: cmp x8, x9
; CHECK-NEXT: mov w8, #-1 // =0xffffffff
; CHECK-NEXT: cneg w0, w8, hs
@@ -1383,32 +1419,32 @@ define i1 @length32_lt(ptr %x, ptr %y) nounwind {
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB63_5
+; CHECK-NEXT: b.ne .LBB65_5
; CHECK-NEXT: // %bb.1: // %loadbb1
; CHECK-NEXT: ldr x8, [x0, #8]
; CHECK-NEXT: ldr x9, [x1, #8]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB63_5
+; CHECK-NEXT: b.ne .LBB65_5
; CHECK-NEXT: // %bb.2: // %loadbb2
; CHECK-NEXT: ldr x8, [x0, #16]
; CHECK-NEXT: ldr x9, [x1, #16]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB63_5
+; CHECK-NEXT: b.ne .LBB65_5
; CHECK-NEXT: // %bb.3: // %loadbb3
; CHECK-NEXT: ldr x8, [x0, #24]
; CHECK-NEXT: ldr x9, [x1, #24]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB63_5
+; CHECK-NEXT: b.ne .LBB65_5
; CHECK-NEXT: // %bb.4:
; CHECK-NEXT: lsr w0, wzr, #31
; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB63_5: // %res_block
+; CHECK-NEXT: .LBB65_5: // %res_block
; CHECK-NEXT: cmp x8, x9
; CHECK-NEXT: mov w8, #-1 // =0xffffffff
; CHECK-NEXT: cneg w8, w8, hs
@@ -1427,36 +1463,36 @@ define i1 @length32_gt(ptr %x, ptr %y) nounwind {
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB64_5
+; CHECK-NEXT: b.ne .LBB66_5
; CHECK-NEXT: // %bb.1: // %loadbb1
; CHECK-NEXT: ldr x8, [x0, #8]
; CHECK-NEXT: ldr x9, [x1, #8]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB64_5
+; CHECK-NEXT: b.ne .LBB66_5
; CHECK-NEXT: // %bb.2: // %loadbb2
; CHECK-NEXT: ldr x8, [x0, #16]
; CHECK-NEXT: ldr x9, [x1, #16]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB64_5
+; CHECK-NEXT: b.ne .LBB66_5
; CHECK-NEXT: // %bb.3: // %loadbb3
; CHECK-NEXT: ldr x8, [x0, #24]
; CHECK-NEXT: ldr x9, [x1, #24]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB64_5
+; CHECK-NEXT: b.ne .LBB66_5
; CHECK-NEXT: // %bb.4:
; CHECK-NEXT: mov w8, wzr
-; CHECK-NEXT: b .LBB64_6
-; CHECK-NEXT: .LBB64_5: // %res_block
+; CHECK-NEXT: b .LBB66_6
+; CHECK-NEXT: .LBB66_5: // %res_block
; CHECK-NEXT: cmp x8, x9
; CHECK-NEXT: mov w8, #-1 // =0xffffffff
; CHECK-NEXT: cneg w8, w8, hs
-; CHECK-NEXT: .LBB64_6: // %endblock
+; CHECK-NEXT: .LBB66_6: // %endblock
; CHECK-NEXT: cmp w8, #0
; CHECK-NEXT: cset w0, gt
; CHECK-NEXT: ret
@@ -1523,46 +1559,46 @@ define i32 @length48(ptr %X, ptr %Y) nounwind {
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB67_7
+; CHECK-NEXT: b.ne .LBB69_7
; CHECK-NEXT: // %bb.1: // %loadbb1
; CHECK-NEXT: ldr x8, [x0, #8]
; CHECK-NEXT: ldr x9, [x1, #8]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB67_7
+; CHECK-NEXT: b.ne .LBB69_7
; CHECK-NEXT: // %bb.2: // %loadbb2
; CHECK-NEXT: ldr x8, [x0, #16]
; CHECK-NEXT: ldr x9, [x1, #16]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB67_7
+; CHECK-NEXT: b.ne .LBB69_7
; CHECK-NEXT: // %bb.3: // %loadbb3
; CHECK-NEXT: ldr x8, [x0, #24]
; CHECK-NEXT: ldr x9, [x1, #24]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB67_7
+; CHECK-NEXT: b.ne .LBB69_7
; CHECK-NEXT: // %bb.4: // %loadbb4
; CHECK-NEXT: ldr x8, [x0, #32]
; CHECK-NEXT: ldr x9, [x1, #32]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB67_7
+; CHECK-NEXT: b.ne .LBB69_7
; CHECK-NEXT: // %bb.5: // %loadbb5
; CHECK-NEXT: ldr x8, [x0, #40]
; CHECK-NEXT: ldr x9, [x1, #40]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB67_7
+; CHECK-NEXT: b.ne .LBB69_7
; CHECK-NEXT: // %bb.6:
; CHECK-NEXT: mov w0, wzr
; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB67_7: // %res_block
+; CHECK-NEXT: .LBB69_7: // %res_block
; CHECK-NEXT: cmp x8, x9
; CHECK-NEXT: mov w8, #-1 // =0xffffffff
; CHECK-NEXT: cneg w0, w8, hs
@@ -1601,46 +1637,46 @@ define i1 @length48_lt(ptr %x, ptr %y) nounwind {
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB69_7
+; CHECK-NEXT: b.ne .LBB71_7
; CHECK-NEXT: // %bb.1: // %loadbb1
; CHECK-NEXT: ldr x8, [x0, #8]
; CHECK-NEXT: ldr x9, [x1, #8]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB69_7
+; CHECK-NEXT: b.ne .LBB71_7
; CHECK-NEXT: // %bb.2: // %loadbb2
; CHECK-NEXT: ldr x8, [x0, #16]
; CHECK-NEXT: ldr x9, [x1, #16]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB69_7
+; CHECK-NEXT: b.ne .LBB71_7
; CHECK-NEXT: // %bb.3: // %loadbb3
; CHECK-NEXT: ldr x8, [x0, #24]
; CHECK-NEXT: ldr x9, [x1, #24]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB69_7
+; CHECK-NEXT: b.ne .LBB71_7
; CHECK-NEXT: // %bb.4: // %loadbb4
; CHECK-NEXT: ldr x8, [x0, #32]
; CHECK-NEXT: ldr x9, [x1, #32]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB69_7
+; CHECK-NEXT: b.ne .LBB71_7
; CHECK-NEXT: // %bb.5: // %loadbb5
; CHECK-NEXT: ldr x8, [x0, #40]
; CHECK-NEXT: ldr x9, [x1, #40]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB69_7
+; CHECK-NEXT: b.ne .LBB71_7
; CHECK-NEXT: // %bb.6:
; CHECK-NEXT: lsr w0, wzr, #31
; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB69_7: // %res_block
+; CHECK-NEXT: .LBB71_7: // %res_block
; CHECK-NEXT: cmp x8, x9
; CHECK-NEXT: mov w8, #-1 // =0xffffffff
; CHECK-NEXT: cneg w8, w8, hs
@@ -1659,50 +1695,50 @@ define i1 @length48_gt(ptr %x, ptr %y) nounwind {
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB70_7
+; CHECK-NEXT: b.ne .LBB72_7
; CHECK-NEXT: // %bb.1: // %loadbb1
; CHECK-NEXT: ldr x8, [x0, #8]
; CHECK-NEXT: ldr x9, [x1, #8]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB70_7
+; CHECK-NEXT: b.ne .LBB72_7
; CHECK-NEXT: // %bb.2: // %loadbb2
; CHECK-NEXT: ldr x8, [x0, #16]
; CHECK-NEXT: ldr x9, [x1, #16]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB70_7
+; CHECK-NEXT: b.ne .LBB72_7
; CHECK-NEXT: // %bb.3: // %loadbb3
; CHECK-NEXT: ldr x8, [x0, #24]
; CHECK-NEXT: ldr x9, [x1, #24]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB70_7
+; CHECK-NEXT: b.ne .LBB72_7
; CHECK-NEXT: // %bb.4: // %loadbb4
; CHECK-NEXT: ldr x8, [x0, #32]
; CHECK-NEXT: ldr x9, [x1, #32]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB70_7
+; CHECK-NEXT: b.ne .LBB72_7
; CHECK-NEXT: // %bb.5: // %loadbb5
; CHECK-NEXT: ldr x8, [x0, #40]
; CHECK-NEXT: ldr x9, [x1, #40]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB70_7
+; CHECK-NEXT: b.ne .LBB72_7
; CHECK-NEXT: // %bb.6:
; CHECK-NEXT: mov w8, wzr
-; CHECK-NEXT: b .LBB70_8
-; CHECK-NEXT: .LBB70_7: // %res_block
+; CHECK-NEXT: b .LBB72_8
+; CHECK-NEXT: .LBB72_7: // %res_block
; CHECK-NEXT: cmp x8, x9
; CHECK-NEXT: mov w8, #-1 // =0xffffffff
; CHECK-NEXT: cneg w8, w8, hs
-; CHECK-NEXT: .LBB70_8: // %endblock
+; CHECK-NEXT: .LBB72_8: // %endblock
; CHECK-NEXT: cmp w8, #0
; CHECK-NEXT: cset w0, gt
; CHECK-NEXT: ret
@@ -1780,60 +1816,60 @@ define i32 @length63(ptr %X, ptr %Y) nounwind {
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB73_9
+; CHECK-NEXT: b.ne .LBB75_9
; CHECK-NEXT: // %bb.1: // %loadbb1
; CHECK-NEXT: ldr x8, [x0, #8]
; CHECK-NEXT: ldr x9, [x1, #8]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB73_9
+; CHECK-NEXT: b.ne .LBB75_9
; CHECK-NEXT: // %bb.2: // %loadbb2
; CHECK-NEXT: ldr x8, [x0, #16]
; CHECK-NEXT: ldr x9, [x1, #16]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB73_9
+; CHECK-NEXT: b.ne .LBB75_9
; CHECK-NEXT: // %bb.3: // %loadbb3
; CHECK-NEXT: ldr x8, [x0, #24]
; CHECK-NEXT: ldr x9, [x1, #24]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB73_9
+; CHECK-NEXT: b.ne .LBB75_9
; CHECK-NEXT: // %bb.4: // %loadbb4
; CHECK-NEXT: ldr x8, [x0, #32]
; CHECK-NEXT: ldr x9, [x1, #32]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB73_9
+; CHECK-NEXT: b.ne .LBB75_9
; CHECK-NEXT: // %bb.5: // %loadbb5
; CHECK-NEXT: ldr x8, [x0, #40]
; CHECK-NEXT: ldr x9, [x1, #40]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB73_9
+; CHECK-NEXT: b.ne .LBB75_9
; CHECK-NEXT: // %bb.6: // %loadbb6
; CHECK-NEXT: ldr x8, [x0, #48]
; CHECK-NEXT: ldr x9, [x1, #48]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB73_9
+; CHECK-NEXT: b.ne .LBB75_9
; CHECK-NEXT: // %bb.7: // %loadbb7
; CHECK-NEXT: ldur x8, [x0, #55]
; CHECK-NEXT: ldur x9, [x1, #55]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB73_9
+; CHECK-NEXT: b.ne .LBB75_9
; CHECK-NEXT: // %bb.8:
; CHECK-NEXT: mov w0, wzr
; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB73_9: // %res_block
+; CHECK-NEXT: .LBB75_9: // %res_block
; CHECK-NEXT: cmp x8, x9
; CHECK-NEXT: mov w8, #-1 // =0xffffffff
; CHECK-NEXT: cneg w0, w8, hs
@@ -1878,60 +1914,60 @@ define i1 @length63_lt(ptr %x, ptr %y) nounwind {
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB75_9
+; CHECK-NEXT: b.ne .LBB77_9
; CHECK-NEXT: // %bb.1: // %loadbb1
; CHECK-NEXT: ldr x8, [x0, #8]
; CHECK-NEXT: ldr x9, [x1, #8]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB75_9
+; CHECK-NEXT: b.ne .LBB77_9
; CHECK-NEXT: // %bb.2: // %loadbb2
; CHECK-NEXT: ldr x8, [x0, #16]
; CHECK-NEXT: ldr x9, [x1, #16]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB75_9
+; CHECK-NEXT: b.ne .LBB77_9
; CHECK-NEXT: // %bb.3: // %loadbb3
; CHECK-NEXT: ldr x8, [x0, #24]
; CHECK-NEXT: ldr x9, [x1, #24]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB75_9
+; CHECK-NEXT: b.ne .LBB77_9
; CHECK-NEXT: // %bb.4: // %loadbb4
; CHECK-NEXT: ldr x8, [x0, #32]
; CHECK-NEXT: ldr x9, [x1, #32]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB75_9
+; CHECK-NEXT: b.ne .LBB77_9
; CHECK-NEXT: // %bb.5: // %loadbb5
; CHECK-NEXT: ldr x8, [x0, #40]
; CHECK-NEXT: ldr x9, [x1, #40]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB75_9
+; CHECK-NEXT: b.ne .LBB77_9
; CHECK-NEXT: // %bb.6: // %loadbb6
; CHECK-NEXT: ldr x8, [x0, #48]
; CHECK-NEXT: ldr x9, [x1, #48]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB75_9
+; CHECK-NEXT: b.ne .LBB77_9
; CHECK-NEXT: // %bb.7: // %loadbb7
; CHECK-NEXT: ldur x8, [x0, #55]
; CHECK-NEXT: ldur x9, [x1, #55]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB75_9
+; CHECK-NEXT: b.ne .LBB77_9
; CHECK-NEXT: // %bb.8:
; CHECK-NEXT: lsr w0, wzr, #31
; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB75_9: // %res_block
+; CHECK-NEXT: .LBB77_9: // %res_block
; CHECK-NEXT: cmp x8, x9
; CHECK-NEXT: mov w8, #-1 // =0xffffffff
; CHECK-NEXT: cneg w8, w8, hs
@@ -1950,64 +1986,64 @@ define i1 @length63_gt(ptr %x, ptr %y) nounwind {
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB76_9
+; CHECK-NEXT: b.ne .LBB78_9
; CHECK-NEXT: // %bb.1: // %loadbb1
; CHECK-NEXT: ldr x8, [x0, #8]
; CHECK-NEXT: ldr x9, [x1, #8]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB76_9
+; CHECK-NEXT: b.ne .LBB78_9
; CHECK-NEXT: // %bb.2: // %loadbb2
; CHECK-NEXT: ldr x8, [x0, #16]
; CHECK-NEXT: ldr x9, [x1, #16]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB76_9
+; CHECK-NEXT: b.ne .LBB78_9
; CHECK-NEXT: // %bb.3: // %loadbb3
; CHECK-NEXT: ldr x8, [x0, #24]
; CHECK-NEXT: ldr x9, [x1, #24]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB76_9
+; CHECK-NEXT: b.ne .LBB78_9
; CHECK-NEXT: // %bb.4: // %loadbb4
; CHECK-NEXT: ldr x8, [x0, #32]
; CHECK-NEXT: ldr x9, [x1, #32]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB76_9
+; CHECK-NEXT: b.ne .LBB78_9
; CHECK-NEXT: // %bb.5: // %loadbb5
; CHECK-NEXT: ldr x8, [x0, #40]
; CHECK-NEXT: ldr x9, [x1, #40]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB76_9
+; CHECK-NEXT: b.ne .LBB78_9
; CHECK-NEXT: // %bb.6: // %loadbb6
; CHECK-NEXT: ldr x8, [x0, #48]
; CHECK-NEXT: ldr x9, [x1, #48]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB76_9
+; CHECK-NEXT: b.ne .LBB78_9
; CHECK-NEXT: // %bb.7: // %loadbb7
; CHECK-NEXT: ldur x8, [x0, #55]
; CHECK-NEXT: ldur x9, [x1, #55]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB76_9
+; CHECK-NEXT: b.ne .LBB78_9
; CHECK-NEXT: // %bb.8:
; CHECK-NEXT: mov w8, wzr
-; CHECK-NEXT: b .LBB76_10
-; CHECK-NEXT: .LBB76_9: // %res_block
+; CHECK-NEXT: b .LBB78_10
+; CHECK-NEXT: .LBB78_9: // %res_block
; CHECK-NEXT: cmp x8, x9
; CHECK-NEXT: mov w8, #-1 // =0xffffffff
; CHECK-NEXT: cneg w8, w8, hs
-; CHECK-NEXT: .LBB76_10: // %endblock
+; CHECK-NEXT: .LBB78_10: // %endblock
; CHECK-NEXT: cmp w8, #0
; CHECK-NEXT: cset w0, gt
; CHECK-NEXT: ret
@@ -2071,60 +2107,60 @@ define i32 @length64(ptr %X, ptr %Y) nounwind {
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB78_9
+; CHECK-NEXT: b.ne .LBB80_9
; CHECK-NEXT: // %bb.1: // %loadbb1
; CHECK-NEXT: ldr x8, [x0, #8]
; CHECK-NEXT: ldr x9, [x1, #8]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB78_9
+; CHECK-NEXT: b.ne .LBB80_9
; CHECK-NEXT: // %bb.2: // %loadbb2
; CHECK-NEXT: ldr x8, [x0, #16]
; CHECK-NEXT: ldr x9, [x1, #16]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB78_9
+; CHECK-NEXT: b.ne .LBB80_9
; CHECK-NEXT: // %bb.3: // %loadbb3
; CHECK-NEXT: ldr x8, [x0, #24]
; CHECK-NEXT: ldr x9, [x1, #24]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB78_9
+; CHECK-NEXT: b.ne .LBB80_9
; CHECK-NEXT: // %bb.4: // %loadbb4
; CHECK-NEXT: ldr x8, [x0, #32]
; CHECK-NEXT: ldr x9, [x1, #32]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB78_9
+; CHECK-NEXT: b.ne .LBB80_9
; CHECK-NEXT: // %bb.5: // %loadbb5
; CHECK-NEXT: ldr x8, [x0, #40]
; CHECK-NEXT: ldr x9, [x1, #40]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB78_9
+; CHECK-NEXT: b.ne .LBB80_9
; CHECK-NEXT: // %bb.6: // %loadbb6
; CHECK-NEXT: ldr x8, [x0, #48]
; CHECK-NEXT: ldr x9, [x1, #48]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB78_9
+; CHECK-NEXT: b.ne .LBB80_9
; CHECK-NEXT: // %bb.7: // %loadbb7
; CHECK-NEXT: ldr x8, [x0, #56]
; CHECK-NEXT: ldr x9, [x1, #56]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB78_9
+; CHECK-NEXT: b.ne .LBB80_9
; CHECK-NEXT: // %bb.8:
; CHECK-NEXT: mov w0, wzr
; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB78_9: // %res_block
+; CHECK-NEXT: .LBB80_9: // %res_block
; CHECK-NEXT: cmp x8, x9
; CHECK-NEXT: mov w8, #-1 // =0xffffffff
; CHECK-NEXT: cneg w0, w8, hs
@@ -2167,60 +2203,60 @@ define i1 @length64_lt(ptr %x, ptr %y) nounwind {
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB80_9
+; CHECK-NEXT: b.ne .LBB82_9
; CHECK-NEXT: // %bb.1: // %loadbb1
; CHECK-NEXT: ldr x8, [x0, #8]
; CHECK-NEXT: ldr x9, [x1, #8]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB80_9
+; CHECK-NEXT: b.ne .LBB82_9
; CHECK-NEXT: // %bb.2: // %loadbb2
; CHECK-NEXT: ldr x8, [x0, #16]
; CHECK-NEXT: ldr x9, [x1, #16]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB80_9
+; CHECK-NEXT: b.ne .LBB82_9
; CHECK-NEXT: // %bb.3: // %loadbb3
; CHECK-NEXT: ldr x8, [x0, #24]
; CHECK-NEXT: ldr x9, [x1, #24]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB80_9
+; CHECK-NEXT: b.ne .LBB82_9
; CHECK-NEXT: // %bb.4: // %loadbb4
; CHECK-NEXT: ldr x8, [x0, #32]
; CHECK-NEXT: ldr x9, [x1, #32]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB80_9
+; CHECK-NEXT: b.ne .LBB82_9
; CHECK-NEXT: // %bb.5: // %loadbb5
; CHECK-NEXT: ldr x8, [x0, #40]
; CHECK-NEXT: ldr x9, [x1, #40]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB80_9
+; CHECK-NEXT: b.ne .LBB82_9
; CHECK-NEXT: // %bb.6: // %loadbb6
; CHECK-NEXT: ldr x8, [x0, #48]
; CHECK-NEXT: ldr x9, [x1, #48]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB80_9
+; CHECK-NEXT: b.ne .LBB82_9
; CHECK-NEXT: // %bb.7: // %loadbb7
; CHECK-NEXT: ldr x8, [x0, #56]
; CHECK-NEXT: ldr x9, [x1, #56]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB80_9
+; CHECK-NEXT: b.ne .LBB82_9
; CHECK-NEXT: // %bb.8:
; CHECK-NEXT: lsr w0, wzr, #31
; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB80_9: // %res_block
+; CHECK-NEXT: .LBB82_9: // %res_block
; CHECK-NEXT: cmp x8, x9
; CHECK-NEXT: mov w8, #-1 // =0xffffffff
; CHECK-NEXT: cneg w8, w8, hs
@@ -2239,64 +2275,64 @@ define i1 @length64_gt(ptr %x, ptr %y) nounwind {
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB81_9
+; CHECK-NEXT: b.ne .LBB83_9
; CHECK-NEXT: // %bb.1: // %loadbb1
; CHECK-NEXT: ldr x8, [x0, #8]
; CHECK-NEXT: ldr x9, [x1, #8]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB81_9
+; CHECK-NEXT: b.ne .LBB83_9
; CHECK-NEXT: // %bb.2: // %loadbb2
; CHECK-NEXT: ldr x8, [x0, #16]
; CHECK-NEXT: ldr x9, [x1, #16]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB81_9
+; CHECK-NEXT: b.ne .LBB83_9
; CHECK-NEXT: // %bb.3: // %loadbb3
; CHECK-NEXT: ldr x8, [x0, #24]
; CHECK-NEXT: ldr x9, [x1, #24]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB81_9
+; CHECK-NEXT: b.ne .LBB83_9
; CHECK-NEXT: // %bb.4: // %loadbb4
; CHECK-NEXT: ldr x8, [x0, #32]
; CHECK-NEXT: ldr x9, [x1, #32]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB81_9
+; CHECK-NEXT: b.ne .LBB83_9
; CHECK-NEXT: // %bb.5: // %loadbb5
; CHECK-NEXT: ldr x8, [x0, #40]
; CHECK-NEXT: ldr x9, [x1, #40]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB81_9
+; CHECK-NEXT: b.ne .LBB83_9
; CHECK-NEXT: // %bb.6: // %loadbb6
; CHECK-NEXT: ldr x8, [x0, #48]
; CHECK-NEXT: ldr x9, [x1, #48]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB81_9
+; CHECK-NEXT: b.ne .LBB83_9
; CHECK-NEXT: // %bb.7: // %loadbb7
; CHECK-NEXT: ldr x8, [x0, #56]
; CHECK-NEXT: ldr x9, [x1, #56]
; CHECK-NEXT: rev x8, x8
; CHECK-NEXT: rev x9, x9
; CHECK-NEXT: cmp x8, x9
-; CHECK-NEXT: b.ne .LBB81_9
+; CHECK-NEXT: b.ne .LBB83_9
; CHECK-NEXT: // %bb.8:
; CHECK-NEXT: mov w8, wzr
-; CHECK-NEXT: b .LBB81_10
-; CHECK-NEXT: .LBB81_9: // %res_block
+; CHECK-NEXT: b .LBB83_10
+; CHECK-NEXT: .LBB83_9: // %res_block
; CHECK-NEXT: cmp x8, x9
; CHECK-NEXT: mov w8, #-1 // =0xffffffff
; CHECK-NEXT: cneg w8, w8, hs
-; CHECK-NEXT: .LBB81_10: // %endblock
+; CHECK-NEXT: .LBB83_10: // %endblock
; CHECK-NEXT: cmp w8, #0
; CHECK-NEXT: cset w0, gt
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/memcmp.ll b/llvm/test/CodeGen/RISCV/memcmp.ll
index 831e21af43807c..5adda28acb427d 100644
--- a/llvm/test/CodeGen/RISCV/memcmp.ll
+++ b/llvm/test/CodeGen/RISCV/memcmp.ll
@@ -2710,6 +2710,216 @@ entry:
ret i1 %ret
}
+define i1 @bcmp_le_zero(ptr %s1, ptr %s2) nounwind {
+; CHECK-ALIGNED-RV32-LABEL: bcmp_le_zero:
+; CHECK-ALIGNED-RV32: # %bb.0: # %entry
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-NEXT: slti a0, a0, 1
+; CHECK-ALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV32-NEXT: ret
+;
+; CHECK-ALIGNED-RV64-LABEL: bcmp_le_zero:
+; CHECK-ALIGNED-RV64: # %bb.0: # %entry
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-NEXT: slti a0, a0, 1
+; CHECK-ALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV64-NEXT: ret
+;
+; CHECK-ALIGNED-RV32-ZBB-LABEL: bcmp_le_zero:
+; CHECK-ALIGNED-RV32-ZBB: # %bb.0: # %entry
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-ZBB-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-ZBB-NEXT: slti a0, a0, 1
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
+;
+; CHECK-ALIGNED-RV64-ZBB-LABEL: bcmp_le_zero:
+; CHECK-ALIGNED-RV64-ZBB: # %bb.0: # %entry
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-ZBB-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-ZBB-NEXT: slti a0, a0, 1
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
+;
+; CHECK-ALIGNED-RV32-ZBKB-LABEL: bcmp_le_zero:
+; CHECK-ALIGNED-RV32-ZBKB: # %bb.0: # %entry
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: slti a0, a0, 1
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
+;
+; CHECK-ALIGNED-RV64-ZBKB-LABEL: bcmp_le_zero:
+; CHECK-ALIGNED-RV64-ZBKB: # %bb.0: # %entry
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: slti a0, a0, 1
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
+;
+; CHECK-ALIGNED-RV32-V-LABEL: bcmp_le_zero:
+; CHECK-ALIGNED-RV32-V: # %bb.0: # %entry
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-V-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-V-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-V-NEXT: slti a0, a0, 1
+; CHECK-ALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV32-V-NEXT: ret
+;
+; CHECK-ALIGNED-RV64-V-LABEL: bcmp_le_zero:
+; CHECK-ALIGNED-RV64-V: # %bb.0: # %entry
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-V-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-V-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-V-NEXT: slti a0, a0, 1
+; CHECK-ALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV64-V-NEXT: ret
+;
+; CHECK-UNALIGNED-LABEL: bcmp_le_zero:
+; CHECK-UNALIGNED: # %bb.0: # %entry
+; CHECK-UNALIGNED-NEXT: lw a0, 0(a0)
+; CHECK-UNALIGNED-NEXT: lw a1, 0(a1)
+; CHECK-UNALIGNED-NEXT: xor a0, a0, a1
+; CHECK-UNALIGNED-NEXT: snez a0, a0
+; CHECK-UNALIGNED-NEXT: slti a0, a0, 1
+; CHECK-UNALIGNED-NEXT: ret
+entry:
+ %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iXLen 4)
+ %ret = icmp slt i32 %bcmp, 1
+ ret i1 %ret
+}
+
+define i1 @bcmp_ge_zero(ptr %s1, ptr %s2) nounwind {
+; CHECK-ALIGNED-RV32-LABEL: bcmp_ge_zero:
+; CHECK-ALIGNED-RV32: # %bb.0: # %entry
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-NEXT: slti a0, a0, 0
+; CHECK-ALIGNED-RV32-NEXT: xori a0, a0, 1
+; CHECK-ALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV32-NEXT: ret
+;
+; CHECK-ALIGNED-RV64-LABEL: bcmp_ge_zero:
+; CHECK-ALIGNED-RV64: # %bb.0: # %entry
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-NEXT: slti a0, a0, 0
+; CHECK-ALIGNED-RV64-NEXT: xori a0, a0, 1
+; CHECK-ALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV64-NEXT: ret
+;
+; CHECK-ALIGNED-RV32-ZBB-LABEL: bcmp_ge_zero:
+; CHECK-ALIGNED-RV32-ZBB: # %bb.0: # %entry
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-ZBB-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-ZBB-NEXT: slti a0, a0, 0
+; CHECK-ALIGNED-RV32-ZBB-NEXT: xori a0, a0, 1
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
+;
+; CHECK-ALIGNED-RV64-ZBB-LABEL: bcmp_ge_zero:
+; CHECK-ALIGNED-RV64-ZBB: # %bb.0: # %entry
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-ZBB-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-ZBB-NEXT: slti a0, a0, 0
+; CHECK-ALIGNED-RV64-ZBB-NEXT: xori a0, a0, 1
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
+;
+; CHECK-ALIGNED-RV32-ZBKB-LABEL: bcmp_ge_zero:
+; CHECK-ALIGNED-RV32-ZBKB: # %bb.0: # %entry
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: slti a0, a0, 0
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: xori a0, a0, 1
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
+;
+; CHECK-ALIGNED-RV64-ZBKB-LABEL: bcmp_ge_zero:
+; CHECK-ALIGNED-RV64-ZBKB: # %bb.0: # %entry
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: slti a0, a0, 0
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: xori a0, a0, 1
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
+;
+; CHECK-ALIGNED-RV32-V-LABEL: bcmp_ge_zero:
+; CHECK-ALIGNED-RV32-V: # %bb.0: # %entry
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-V-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-V-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-V-NEXT: slti a0, a0, 0
+; CHECK-ALIGNED-RV32-V-NEXT: xori a0, a0, 1
+; CHECK-ALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV32-V-NEXT: ret
+;
+; CHECK-ALIGNED-RV64-V-LABEL: bcmp_ge_zero:
+; CHECK-ALIGNED-RV64-V: # %bb.0: # %entry
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-V-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-V-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-V-NEXT: slti a0, a0, 0
+; CHECK-ALIGNED-RV64-V-NEXT: xori a0, a0, 1
+; CHECK-ALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV64-V-NEXT: ret
+;
+; CHECK-UNALIGNED-LABEL: bcmp_ge_zero:
+; CHECK-UNALIGNED: # %bb.0: # %entry
+; CHECK-UNALIGNED-NEXT: li a0, 1
+; CHECK-UNALIGNED-NEXT: ret
+entry:
+ %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iXLen 4)
+ %ret = icmp sgt i32 %bcmp, -1
+ ret i1 %ret
+}
+
define i32 @memcmp_size_0(ptr %s1, ptr %s2) nounwind {
; CHECK-LABEL: memcmp_size_0:
; CHECK: # %bb.0: # %entry
@@ -3517,13 +3727,13 @@ define i32 @memcmp_size_5(ptr %s1, ptr %s2) nounwind {
; CHECK-UNALIGNED-RV32-ZBB-NEXT: lw a3, 0(a1)
; CHECK-UNALIGNED-RV32-ZBB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV32-ZBB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB26_2
+; CHECK-UNALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB28_2
; CHECK-UNALIGNED-RV32-ZBB-NEXT: # %bb.1: # %loadbb1
; CHECK-UNALIGNED-RV32-ZBB-NEXT: lbu a0, 4(a0)
; CHECK-UNALIGNED-RV32-ZBB-NEXT: lbu a1, 4(a1)
; CHECK-UNALIGNED-RV32-ZBB-NEXT: sub a0, a0, a1
; CHECK-UNALIGNED-RV32-ZBB-NEXT: ret
-; CHECK-UNALIGNED-RV32-ZBB-NEXT: .LBB26_2: # %res_block
+; CHECK-UNALIGNED-RV32-ZBB-NEXT: .LBB28_2: # %res_block
; CHECK-UNALIGNED-RV32-ZBB-NEXT: sltu a0, a2, a3
; CHECK-UNALIGNED-RV32-ZBB-NEXT: neg a0, a0
; CHECK-UNALIGNED-RV32-ZBB-NEXT: ori a0, a0, 1
@@ -3552,13 +3762,13 @@ define i32 @memcmp_size_5(ptr %s1, ptr %s2) nounwind {
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: lw a3, 0(a1)
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB26_2
+; CHECK-UNALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB28_2
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: # %bb.1: # %loadbb1
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: lbu a0, 4(a0)
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: lbu a1, 4(a1)
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: sub a0, a0, a1
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: ret
-; CHECK-UNALIGNED-RV32-ZBKB-NEXT: .LBB26_2: # %res_block
+; CHECK-UNALIGNED-RV32-ZBKB-NEXT: .LBB28_2: # %res_block
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: sltu a0, a2, a3
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: neg a0, a0
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: ori a0, a0, 1
@@ -3710,7 +3920,7 @@ define i32 @memcmp_size_6(ptr %s1, ptr %s2) nounwind {
; CHECK-UNALIGNED-RV32-ZBB-NEXT: lw a3, 0(a1)
; CHECK-UNALIGNED-RV32-ZBB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV32-ZBB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB27_3
+; CHECK-UNALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB29_3
; CHECK-UNALIGNED-RV32-ZBB-NEXT: # %bb.1: # %loadbb1
; CHECK-UNALIGNED-RV32-ZBB-NEXT: lh a0, 4(a0)
; CHECK-UNALIGNED-RV32-ZBB-NEXT: lh a1, 4(a1)
@@ -3718,11 +3928,11 @@ define i32 @memcmp_size_6(ptr %s1, ptr %s2) nounwind {
; CHECK-UNALIGNED-RV32-ZBB-NEXT: rev8 a3, a1
; CHECK-UNALIGNED-RV32-ZBB-NEXT: srli a2, a2, 16
; CHECK-UNALIGNED-RV32-ZBB-NEXT: srli a3, a3, 16
-; CHECK-UNALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB27_3
+; CHECK-UNALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB29_3
; CHECK-UNALIGNED-RV32-ZBB-NEXT: # %bb.2:
; CHECK-UNALIGNED-RV32-ZBB-NEXT: li a0, 0
; CHECK-UNALIGNED-RV32-ZBB-NEXT: ret
-; CHECK-UNALIGNED-RV32-ZBB-NEXT: .LBB27_3: # %res_block
+; CHECK-UNALIGNED-RV32-ZBB-NEXT: .LBB29_3: # %res_block
; CHECK-UNALIGNED-RV32-ZBB-NEXT: sltu a0, a2, a3
; CHECK-UNALIGNED-RV32-ZBB-NEXT: neg a0, a0
; CHECK-UNALIGNED-RV32-ZBB-NEXT: ori a0, a0, 1
@@ -3751,7 +3961,7 @@ define i32 @memcmp_size_6(ptr %s1, ptr %s2) nounwind {
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: lw a3, 0(a1)
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB27_3
+; CHECK-UNALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB29_3
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: # %bb.1: # %loadbb1
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: lh a0, 4(a0)
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: lh a1, 4(a1)
@@ -3759,11 +3969,11 @@ define i32 @memcmp_size_6(ptr %s1, ptr %s2) nounwind {
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: rev8 a3, a1
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: srli a2, a2, 16
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: srli a3, a3, 16
-; CHECK-UNALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB27_3
+; CHECK-UNALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB29_3
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: # %bb.2:
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: li a0, 0
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: ret
-; CHECK-UNALIGNED-RV32-ZBKB-NEXT: .LBB27_3: # %res_block
+; CHECK-UNALIGNED-RV32-ZBKB-NEXT: .LBB29_3: # %res_block
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: sltu a0, a2, a3
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: neg a0, a0
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: ori a0, a0, 1
@@ -3915,17 +4125,17 @@ define i32 @memcmp_size_7(ptr %s1, ptr %s2) nounwind {
; CHECK-UNALIGNED-RV32-ZBB-NEXT: lw a3, 0(a1)
; CHECK-UNALIGNED-RV32-ZBB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV32-ZBB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB28_3
+; CHECK-UNALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB30_3
; CHECK-UNALIGNED-RV32-ZBB-NEXT: # %bb.1: # %loadbb1
; CHECK-UNALIGNED-RV32-ZBB-NEXT: lw a0, 3(a0)
; CHECK-UNALIGNED-RV32-ZBB-NEXT: lw a1, 3(a1)
; CHECK-UNALIGNED-RV32-ZBB-NEXT: rev8 a2, a0
; CHECK-UNALIGNED-RV32-ZBB-NEXT: rev8 a3, a1
-; CHECK-UNALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB28_3
+; CHECK-UNALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB30_3
; CHECK-UNALIGNED-RV32-ZBB-NEXT: # %bb.2:
; CHECK-UNALIGNED-RV32-ZBB-NEXT: li a0, 0
; CHECK-UNALIGNED-RV32-ZBB-NEXT: ret
-; CHECK-UNALIGNED-RV32-ZBB-NEXT: .LBB28_3: # %res_block
+; CHECK-UNALIGNED-RV32-ZBB-NEXT: .LBB30_3: # %res_block
; CHECK-UNALIGNED-RV32-ZBB-NEXT: sltu a0, a2, a3
; CHECK-UNALIGNED-RV32-ZBB-NEXT: neg a0, a0
; CHECK-UNALIGNED-RV32-ZBB-NEXT: ori a0, a0, 1
@@ -3939,7 +4149,7 @@ define i32 @memcmp_size_7(ptr %s1, ptr %s2) nounwind {
; CHECK-UNALIGNED-RV64-ZBB-NEXT: rev8 a3, a3
; CHECK-UNALIGNED-RV64-ZBB-NEXT: srli a2, a2, 32
; CHECK-UNALIGNED-RV64-ZBB-NEXT: srli a3, a3, 32
-; CHECK-UNALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB28_3
+; CHECK-UNALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB30_3
; CHECK-UNALIGNED-RV64-ZBB-NEXT: # %bb.1: # %loadbb1
; CHECK-UNALIGNED-RV64-ZBB-NEXT: lw a0, 3(a0)
; CHECK-UNALIGNED-RV64-ZBB-NEXT: lw a1, 3(a1)
@@ -3947,11 +4157,11 @@ define i32 @memcmp_size_7(ptr %s1, ptr %s2) nounwind {
; CHECK-UNALIGNED-RV64-ZBB-NEXT: rev8 a3, a1
; CHECK-UNALIGNED-RV64-ZBB-NEXT: srli a2, a2, 32
; CHECK-UNALIGNED-RV64-ZBB-NEXT: srli a3, a3, 32
-; CHECK-UNALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB28_3
+; CHECK-UNALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB30_3
; CHECK-UNALIGNED-RV64-ZBB-NEXT: # %bb.2:
; CHECK-UNALIGNED-RV64-ZBB-NEXT: li a0, 0
; CHECK-UNALIGNED-RV64-ZBB-NEXT: ret
-; CHECK-UNALIGNED-RV64-ZBB-NEXT: .LBB28_3: # %res_block
+; CHECK-UNALIGNED-RV64-ZBB-NEXT: .LBB30_3: # %res_block
; CHECK-UNALIGNED-RV64-ZBB-NEXT: sltu a0, a2, a3
; CHECK-UNALIGNED-RV64-ZBB-NEXT: neg a0, a0
; CHECK-UNALIGNED-RV64-ZBB-NEXT: ori a0, a0, 1
@@ -3963,17 +4173,17 @@ define i32 @memcmp_size_7(ptr %s1, ptr %s2) nounwind {
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: lw a3, 0(a1)
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB28_3
+; CHECK-UNALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB30_3
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: # %bb.1: # %loadbb1
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: lw a0, 3(a0)
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: lw a1, 3(a1)
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: rev8 a2, a0
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: rev8 a3, a1
-; CHECK-UNALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB28_3
+; CHECK-UNALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB30_3
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: # %bb.2:
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: li a0, 0
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: ret
-; CHECK-UNALIGNED-RV32-ZBKB-NEXT: .LBB28_3: # %res_block
+; CHECK-UNALIGNED-RV32-ZBKB-NEXT: .LBB30_3: # %res_block
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: sltu a0, a2, a3
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: neg a0, a0
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: ori a0, a0, 1
@@ -3987,7 +4197,7 @@ define i32 @memcmp_size_7(ptr %s1, ptr %s2) nounwind {
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: rev8 a3, a3
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: srli a2, a2, 32
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: srli a3, a3, 32
-; CHECK-UNALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB28_3
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB30_3
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: # %bb.1: # %loadbb1
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: lw a0, 3(a0)
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: lw a1, 3(a1)
@@ -3995,11 +4205,11 @@ define i32 @memcmp_size_7(ptr %s1, ptr %s2) nounwind {
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: rev8 a3, a1
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: srli a2, a2, 32
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: srli a3, a3, 32
-; CHECK-UNALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB28_3
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB30_3
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: # %bb.2:
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: li a0, 0
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ret
-; CHECK-UNALIGNED-RV64-ZBKB-NEXT: .LBB28_3: # %res_block
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: .LBB30_3: # %res_block
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: sltu a0, a2, a3
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: neg a0, a0
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ori a0, a0, 1
@@ -4136,17 +4346,17 @@ define i32 @memcmp_size_8(ptr %s1, ptr %s2) nounwind {
; CHECK-UNALIGNED-RV32-ZBB-NEXT: lw a3, 0(a1)
; CHECK-UNALIGNED-RV32-ZBB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV32-ZBB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB29_3
+; CHECK-UNALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB31_3
; CHECK-UNALIGNED-RV32-ZBB-NEXT: # %bb.1: # %loadbb1
; CHECK-UNALIGNED-RV32-ZBB-NEXT: lw a0, 4(a0)
; CHECK-UNALIGNED-RV32-ZBB-NEXT: lw a1, 4(a1)
; CHECK-UNALIGNED-RV32-ZBB-NEXT: rev8 a2, a0
; CHECK-UNALIGNED-RV32-ZBB-NEXT: rev8 a3, a1
-; CHECK-UNALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB29_3
+; CHECK-UNALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB31_3
; CHECK-UNALIGNED-RV32-ZBB-NEXT: # %bb.2:
; CHECK-UNALIGNED-RV32-ZBB-NEXT: li a0, 0
; CHECK-UNALIGNED-RV32-ZBB-NEXT: ret
-; CHECK-UNALIGNED-RV32-ZBB-NEXT: .LBB29_3: # %res_block
+; CHECK-UNALIGNED-RV32-ZBB-NEXT: .LBB31_3: # %res_block
; CHECK-UNALIGNED-RV32-ZBB-NEXT: sltu a0, a2, a3
; CHECK-UNALIGNED-RV32-ZBB-NEXT: neg a0, a0
; CHECK-UNALIGNED-RV32-ZBB-NEXT: ori a0, a0, 1
@@ -4169,17 +4379,17 @@ define i32 @memcmp_size_8(ptr %s1, ptr %s2) nounwind {
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: lw a3, 0(a1)
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB29_3
+; CHECK-UNALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB31_3
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: # %bb.1: # %loadbb1
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: lw a0, 4(a0)
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: lw a1, 4(a1)
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: rev8 a2, a0
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: rev8 a3, a1
-; CHECK-UNALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB29_3
+; CHECK-UNALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB31_3
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: # %bb.2:
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: li a0, 0
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: ret
-; CHECK-UNALIGNED-RV32-ZBKB-NEXT: .LBB29_3: # %res_block
+; CHECK-UNALIGNED-RV32-ZBKB-NEXT: .LBB31_3: # %res_block
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: sltu a0, a2, a3
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: neg a0, a0
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: ori a0, a0, 1
@@ -4327,29 +4537,29 @@ define i32 @memcmp_size_15(ptr %s1, ptr %s2) nounwind {
; CHECK-UNALIGNED-RV32-ZBB-NEXT: lw a3, 0(a1)
; CHECK-UNALIGNED-RV32-ZBB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV32-ZBB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB30_5
+; CHECK-UNALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB32_5
; CHECK-UNALIGNED-RV32-ZBB-NEXT: # %bb.1: # %loadbb1
; CHECK-UNALIGNED-RV32-ZBB-NEXT: lw a2, 4(a0)
; CHECK-UNALIGNED-RV32-ZBB-NEXT: lw a3, 4(a1)
; CHECK-UNALIGNED-RV32-ZBB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV32-ZBB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB30_5
+; CHECK-UNALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB32_5
; CHECK-UNALIGNED-RV32-ZBB-NEXT: # %bb.2: # %loadbb2
; CHECK-UNALIGNED-RV32-ZBB-NEXT: lw a2, 8(a0)
; CHECK-UNALIGNED-RV32-ZBB-NEXT: lw a3, 8(a1)
; CHECK-UNALIGNED-RV32-ZBB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV32-ZBB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB30_5
+; CHECK-UNALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB32_5
; CHECK-UNALIGNED-RV32-ZBB-NEXT: # %bb.3: # %loadbb3
; CHECK-UNALIGNED-RV32-ZBB-NEXT: lw a0, 11(a0)
; CHECK-UNALIGNED-RV32-ZBB-NEXT: lw a1, 11(a1)
; CHECK-UNALIGNED-RV32-ZBB-NEXT: rev8 a2, a0
; CHECK-UNALIGNED-RV32-ZBB-NEXT: rev8 a3, a1
-; CHECK-UNALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB30_5
+; CHECK-UNALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB32_5
; CHECK-UNALIGNED-RV32-ZBB-NEXT: # %bb.4:
; CHECK-UNALIGNED-RV32-ZBB-NEXT: li a0, 0
; CHECK-UNALIGNED-RV32-ZBB-NEXT: ret
-; CHECK-UNALIGNED-RV32-ZBB-NEXT: .LBB30_5: # %res_block
+; CHECK-UNALIGNED-RV32-ZBB-NEXT: .LBB32_5: # %res_block
; CHECK-UNALIGNED-RV32-ZBB-NEXT: sltu a0, a2, a3
; CHECK-UNALIGNED-RV32-ZBB-NEXT: neg a0, a0
; CHECK-UNALIGNED-RV32-ZBB-NEXT: ori a0, a0, 1
@@ -4361,17 +4571,17 @@ define i32 @memcmp_size_15(ptr %s1, ptr %s2) nounwind {
; CHECK-UNALIGNED-RV64-ZBB-NEXT: ld a3, 0(a1)
; CHECK-UNALIGNED-RV64-ZBB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV64-ZBB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB30_3
+; CHECK-UNALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB32_3
; CHECK-UNALIGNED-RV64-ZBB-NEXT: # %bb.1: # %loadbb1
; CHECK-UNALIGNED-RV64-ZBB-NEXT: ld a0, 7(a0)
; CHECK-UNALIGNED-RV64-ZBB-NEXT: ld a1, 7(a1)
; CHECK-UNALIGNED-RV64-ZBB-NEXT: rev8 a2, a0
; CHECK-UNALIGNED-RV64-ZBB-NEXT: rev8 a3, a1
-; CHECK-UNALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB30_3
+; CHECK-UNALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB32_3
; CHECK-UNALIGNED-RV64-ZBB-NEXT: # %bb.2:
; CHECK-UNALIGNED-RV64-ZBB-NEXT: li a0, 0
; CHECK-UNALIGNED-RV64-ZBB-NEXT: ret
-; CHECK-UNALIGNED-RV64-ZBB-NEXT: .LBB30_3: # %res_block
+; CHECK-UNALIGNED-RV64-ZBB-NEXT: .LBB32_3: # %res_block
; CHECK-UNALIGNED-RV64-ZBB-NEXT: sltu a0, a2, a3
; CHECK-UNALIGNED-RV64-ZBB-NEXT: neg a0, a0
; CHECK-UNALIGNED-RV64-ZBB-NEXT: ori a0, a0, 1
@@ -4383,29 +4593,29 @@ define i32 @memcmp_size_15(ptr %s1, ptr %s2) nounwind {
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: lw a3, 0(a1)
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB30_5
+; CHECK-UNALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB32_5
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: # %bb.1: # %loadbb1
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: lw a2, 4(a0)
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: lw a3, 4(a1)
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB30_5
+; CHECK-UNALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB32_5
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: # %bb.2: # %loadbb2
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: lw a2, 8(a0)
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: lw a3, 8(a1)
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB30_5
+; CHECK-UNALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB32_5
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: # %bb.3: # %loadbb3
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: lw a0, 11(a0)
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: lw a1, 11(a1)
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: rev8 a2, a0
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: rev8 a3, a1
-; CHECK-UNALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB30_5
+; CHECK-UNALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB32_5
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: # %bb.4:
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: li a0, 0
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: ret
-; CHECK-UNALIGNED-RV32-ZBKB-NEXT: .LBB30_5: # %res_block
+; CHECK-UNALIGNED-RV32-ZBKB-NEXT: .LBB32_5: # %res_block
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: sltu a0, a2, a3
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: neg a0, a0
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: ori a0, a0, 1
@@ -4417,17 +4627,17 @@ define i32 @memcmp_size_15(ptr %s1, ptr %s2) nounwind {
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ld a3, 0(a1)
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB30_3
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB32_3
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: # %bb.1: # %loadbb1
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ld a0, 7(a0)
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ld a1, 7(a1)
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: rev8 a2, a0
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: rev8 a3, a1
-; CHECK-UNALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB30_3
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB32_3
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: # %bb.2:
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: li a0, 0
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ret
-; CHECK-UNALIGNED-RV64-ZBKB-NEXT: .LBB30_3: # %res_block
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: .LBB32_3: # %res_block
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: sltu a0, a2, a3
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: neg a0, a0
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ori a0, a0, 1
@@ -4564,29 +4774,29 @@ define i32 @memcmp_size_16(ptr %s1, ptr %s2) nounwind {
; CHECK-UNALIGNED-RV32-ZBB-NEXT: lw a3, 0(a1)
; CHECK-UNALIGNED-RV32-ZBB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV32-ZBB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB31_5
+; CHECK-UNALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB33_5
; CHECK-UNALIGNED-RV32-ZBB-NEXT: # %bb.1: # %loadbb1
; CHECK-UNALIGNED-RV32-ZBB-NEXT: lw a2, 4(a0)
; CHECK-UNALIGNED-RV32-ZBB-NEXT: lw a3, 4(a1)
; CHECK-UNALIGNED-RV32-ZBB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV32-ZBB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB31_5
+; CHECK-UNALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB33_5
; CHECK-UNALIGNED-RV32-ZBB-NEXT: # %bb.2: # %loadbb2
; CHECK-UNALIGNED-RV32-ZBB-NEXT: lw a2, 8(a0)
; CHECK-UNALIGNED-RV32-ZBB-NEXT: lw a3, 8(a1)
; CHECK-UNALIGNED-RV32-ZBB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV32-ZBB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB31_5
+; CHECK-UNALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB33_5
; CHECK-UNALIGNED-RV32-ZBB-NEXT: # %bb.3: # %loadbb3
; CHECK-UNALIGNED-RV32-ZBB-NEXT: lw a0, 12(a0)
; CHECK-UNALIGNED-RV32-ZBB-NEXT: lw a1, 12(a1)
; CHECK-UNALIGNED-RV32-ZBB-NEXT: rev8 a2, a0
; CHECK-UNALIGNED-RV32-ZBB-NEXT: rev8 a3, a1
-; CHECK-UNALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB31_5
+; CHECK-UNALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB33_5
; CHECK-UNALIGNED-RV32-ZBB-NEXT: # %bb.4:
; CHECK-UNALIGNED-RV32-ZBB-NEXT: li a0, 0
; CHECK-UNALIGNED-RV32-ZBB-NEXT: ret
-; CHECK-UNALIGNED-RV32-ZBB-NEXT: .LBB31_5: # %res_block
+; CHECK-UNALIGNED-RV32-ZBB-NEXT: .LBB33_5: # %res_block
; CHECK-UNALIGNED-RV32-ZBB-NEXT: sltu a0, a2, a3
; CHECK-UNALIGNED-RV32-ZBB-NEXT: neg a0, a0
; CHECK-UNALIGNED-RV32-ZBB-NEXT: ori a0, a0, 1
@@ -4598,17 +4808,17 @@ define i32 @memcmp_size_16(ptr %s1, ptr %s2) nounwind {
; CHECK-UNALIGNED-RV64-ZBB-NEXT: ld a3, 0(a1)
; CHECK-UNALIGNED-RV64-ZBB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV64-ZBB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB31_3
+; CHECK-UNALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB33_3
; CHECK-UNALIGNED-RV64-ZBB-NEXT: # %bb.1: # %loadbb1
; CHECK-UNALIGNED-RV64-ZBB-NEXT: ld a0, 8(a0)
; CHECK-UNALIGNED-RV64-ZBB-NEXT: ld a1, 8(a1)
; CHECK-UNALIGNED-RV64-ZBB-NEXT: rev8 a2, a0
; CHECK-UNALIGNED-RV64-ZBB-NEXT: rev8 a3, a1
-; CHECK-UNALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB31_3
+; CHECK-UNALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB33_3
; CHECK-UNALIGNED-RV64-ZBB-NEXT: # %bb.2:
; CHECK-UNALIGNED-RV64-ZBB-NEXT: li a0, 0
; CHECK-UNALIGNED-RV64-ZBB-NEXT: ret
-; CHECK-UNALIGNED-RV64-ZBB-NEXT: .LBB31_3: # %res_block
+; CHECK-UNALIGNED-RV64-ZBB-NEXT: .LBB33_3: # %res_block
; CHECK-UNALIGNED-RV64-ZBB-NEXT: sltu a0, a2, a3
; CHECK-UNALIGNED-RV64-ZBB-NEXT: neg a0, a0
; CHECK-UNALIGNED-RV64-ZBB-NEXT: ori a0, a0, 1
@@ -4620,29 +4830,29 @@ define i32 @memcmp_size_16(ptr %s1, ptr %s2) nounwind {
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: lw a3, 0(a1)
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB31_5
+; CHECK-UNALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB33_5
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: # %bb.1: # %loadbb1
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: lw a2, 4(a0)
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: lw a3, 4(a1)
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB31_5
+; CHECK-UNALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB33_5
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: # %bb.2: # %loadbb2
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: lw a2, 8(a0)
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: lw a3, 8(a1)
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB31_5
+; CHECK-UNALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB33_5
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: # %bb.3: # %loadbb3
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: lw a0, 12(a0)
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: lw a1, 12(a1)
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: rev8 a2, a0
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: rev8 a3, a1
-; CHECK-UNALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB31_5
+; CHECK-UNALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB33_5
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: # %bb.4:
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: li a0, 0
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: ret
-; CHECK-UNALIGNED-RV32-ZBKB-NEXT: .LBB31_5: # %res_block
+; CHECK-UNALIGNED-RV32-ZBKB-NEXT: .LBB33_5: # %res_block
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: sltu a0, a2, a3
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: neg a0, a0
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: ori a0, a0, 1
@@ -4654,17 +4864,17 @@ define i32 @memcmp_size_16(ptr %s1, ptr %s2) nounwind {
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ld a3, 0(a1)
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB31_3
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB33_3
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: # %bb.1: # %loadbb1
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ld a0, 8(a0)
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ld a1, 8(a1)
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: rev8 a2, a0
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: rev8 a3, a1
-; CHECK-UNALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB31_3
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB33_3
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: # %bb.2:
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: li a0, 0
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ret
-; CHECK-UNALIGNED-RV64-ZBKB-NEXT: .LBB31_3: # %res_block
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: .LBB33_3: # %res_block
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: sltu a0, a2, a3
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: neg a0, a0
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ori a0, a0, 1
@@ -4801,53 +5011,53 @@ define i32 @memcmp_size_31(ptr %s1, ptr %s2) nounwind {
; CHECK-UNALIGNED-RV32-ZBB-NEXT: lw a3, 0(a1)
; CHECK-UNALIGNED-RV32-ZBB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV32-ZBB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB32_9
+; CHECK-UNALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB34_9
; CHECK-UNALIGNED-RV32-ZBB-NEXT: # %bb.1: # %loadbb1
; CHECK-UNALIGNED-RV32-ZBB-NEXT: lw a2, 4(a0)
; CHECK-UNALIGNED-RV32-ZBB-NEXT: lw a3, 4(a1)
; CHECK-UNALIGNED-RV32-ZBB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV32-ZBB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB32_9
+; CHECK-UNALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB34_9
; CHECK-UNALIGNED-RV32-ZBB-NEXT: # %bb.2: # %loadbb2
; CHECK-UNALIGNED-RV32-ZBB-NEXT: lw a2, 8(a0)
; CHECK-UNALIGNED-RV32-ZBB-NEXT: lw a3, 8(a1)
; CHECK-UNALIGNED-RV32-ZBB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV32-ZBB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB32_9
+; CHECK-UNALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB34_9
; CHECK-UNALIGNED-RV32-ZBB-NEXT: # %bb.3: # %loadbb3
; CHECK-UNALIGNED-RV32-ZBB-NEXT: lw a2, 12(a0)
; CHECK-UNALIGNED-RV32-ZBB-NEXT: lw a3, 12(a1)
; CHECK-UNALIGNED-RV32-ZBB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV32-ZBB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB32_9
+; CHECK-UNALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB34_9
; CHECK-UNALIGNED-RV32-ZBB-NEXT: # %bb.4: # %loadbb4
; CHECK-UNALIGNED-RV32-ZBB-NEXT: lw a2, 16(a0)
; CHECK-UNALIGNED-RV32-ZBB-NEXT: lw a3, 16(a1)
; CHECK-UNALIGNED-RV32-ZBB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV32-ZBB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB32_9
+; CHECK-UNALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB34_9
; CHECK-UNALIGNED-RV32-ZBB-NEXT: # %bb.5: # %loadbb5
; CHECK-UNALIGNED-RV32-ZBB-NEXT: lw a2, 20(a0)
; CHECK-UNALIGNED-RV32-ZBB-NEXT: lw a3, 20(a1)
; CHECK-UNALIGNED-RV32-ZBB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV32-ZBB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB32_9
+; CHECK-UNALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB34_9
; CHECK-UNALIGNED-RV32-ZBB-NEXT: # %bb.6: # %loadbb6
; CHECK-UNALIGNED-RV32-ZBB-NEXT: lw a2, 24(a0)
; CHECK-UNALIGNED-RV32-ZBB-NEXT: lw a3, 24(a1)
; CHECK-UNALIGNED-RV32-ZBB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV32-ZBB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB32_9
+; CHECK-UNALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB34_9
; CHECK-UNALIGNED-RV32-ZBB-NEXT: # %bb.7: # %loadbb7
; CHECK-UNALIGNED-RV32-ZBB-NEXT: lw a0, 27(a0)
; CHECK-UNALIGNED-RV32-ZBB-NEXT: lw a1, 27(a1)
; CHECK-UNALIGNED-RV32-ZBB-NEXT: rev8 a2, a0
; CHECK-UNALIGNED-RV32-ZBB-NEXT: rev8 a3, a1
-; CHECK-UNALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB32_9
+; CHECK-UNALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB34_9
; CHECK-UNALIGNED-RV32-ZBB-NEXT: # %bb.8:
; CHECK-UNALIGNED-RV32-ZBB-NEXT: li a0, 0
; CHECK-UNALIGNED-RV32-ZBB-NEXT: ret
-; CHECK-UNALIGNED-RV32-ZBB-NEXT: .LBB32_9: # %res_block
+; CHECK-UNALIGNED-RV32-ZBB-NEXT: .LBB34_9: # %res_block
; CHECK-UNALIGNED-RV32-ZBB-NEXT: sltu a0, a2, a3
; CHECK-UNALIGNED-RV32-ZBB-NEXT: neg a0, a0
; CHECK-UNALIGNED-RV32-ZBB-NEXT: ori a0, a0, 1
@@ -4859,29 +5069,29 @@ define i32 @memcmp_size_31(ptr %s1, ptr %s2) nounwind {
; CHECK-UNALIGNED-RV64-ZBB-NEXT: ld a3, 0(a1)
; CHECK-UNALIGNED-RV64-ZBB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV64-ZBB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB32_5
+; CHECK-UNALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB34_5
; CHECK-UNALIGNED-RV64-ZBB-NEXT: # %bb.1: # %loadbb1
; CHECK-UNALIGNED-RV64-ZBB-NEXT: ld a2, 8(a0)
; CHECK-UNALIGNED-RV64-ZBB-NEXT: ld a3, 8(a1)
; CHECK-UNALIGNED-RV64-ZBB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV64-ZBB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB32_5
+; CHECK-UNALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB34_5
; CHECK-UNALIGNED-RV64-ZBB-NEXT: # %bb.2: # %loadbb2
; CHECK-UNALIGNED-RV64-ZBB-NEXT: ld a2, 16(a0)
; CHECK-UNALIGNED-RV64-ZBB-NEXT: ld a3, 16(a1)
; CHECK-UNALIGNED-RV64-ZBB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV64-ZBB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB32_5
+; CHECK-UNALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB34_5
; CHECK-UNALIGNED-RV64-ZBB-NEXT: # %bb.3: # %loadbb3
; CHECK-UNALIGNED-RV64-ZBB-NEXT: ld a0, 23(a0)
; CHECK-UNALIGNED-RV64-ZBB-NEXT: ld a1, 23(a1)
; CHECK-UNALIGNED-RV64-ZBB-NEXT: rev8 a2, a0
; CHECK-UNALIGNED-RV64-ZBB-NEXT: rev8 a3, a1
-; CHECK-UNALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB32_5
+; CHECK-UNALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB34_5
; CHECK-UNALIGNED-RV64-ZBB-NEXT: # %bb.4:
; CHECK-UNALIGNED-RV64-ZBB-NEXT: li a0, 0
; CHECK-UNALIGNED-RV64-ZBB-NEXT: ret
-; CHECK-UNALIGNED-RV64-ZBB-NEXT: .LBB32_5: # %res_block
+; CHECK-UNALIGNED-RV64-ZBB-NEXT: .LBB34_5: # %res_block
; CHECK-UNALIGNED-RV64-ZBB-NEXT: sltu a0, a2, a3
; CHECK-UNALIGNED-RV64-ZBB-NEXT: neg a0, a0
; CHECK-UNALIGNED-RV64-ZBB-NEXT: ori a0, a0, 1
@@ -4893,53 +5103,53 @@ define i32 @memcmp_size_31(ptr %s1, ptr %s2) nounwind {
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: lw a3, 0(a1)
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB32_9
+; CHECK-UNALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB34_9
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: # %bb.1: # %loadbb1
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: lw a2, 4(a0)
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: lw a3, 4(a1)
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB32_9
+; CHECK-UNALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB34_9
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: # %bb.2: # %loadbb2
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: lw a2, 8(a0)
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: lw a3, 8(a1)
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB32_9
+; CHECK-UNALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB34_9
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: # %bb.3: # %loadbb3
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: lw a2, 12(a0)
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: lw a3, 12(a1)
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB32_9
+; CHECK-UNALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB34_9
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: # %bb.4: # %loadbb4
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: lw a2, 16(a0)
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: lw a3, 16(a1)
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB32_9
+; CHECK-UNALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB34_9
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: # %bb.5: # %loadbb5
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: lw a2, 20(a0)
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: lw a3, 20(a1)
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB32_9
+; CHECK-UNALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB34_9
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: # %bb.6: # %loadbb6
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: lw a2, 24(a0)
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: lw a3, 24(a1)
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB32_9
+; CHECK-UNALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB34_9
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: # %bb.7: # %loadbb7
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: lw a0, 27(a0)
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: lw a1, 27(a1)
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: rev8 a2, a0
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: rev8 a3, a1
-; CHECK-UNALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB32_9
+; CHECK-UNALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB34_9
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: # %bb.8:
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: li a0, 0
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: ret
-; CHECK-UNALIGNED-RV32-ZBKB-NEXT: .LBB32_9: # %res_block
+; CHECK-UNALIGNED-RV32-ZBKB-NEXT: .LBB34_9: # %res_block
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: sltu a0, a2, a3
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: neg a0, a0
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: ori a0, a0, 1
@@ -4951,29 +5161,29 @@ define i32 @memcmp_size_31(ptr %s1, ptr %s2) nounwind {
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ld a3, 0(a1)
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB32_5
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB34_5
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: # %bb.1: # %loadbb1
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ld a2, 8(a0)
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ld a3, 8(a1)
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB32_5
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB34_5
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: # %bb.2: # %loadbb2
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ld a2, 16(a0)
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ld a3, 16(a1)
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB32_5
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB34_5
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: # %bb.3: # %loadbb3
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ld a0, 23(a0)
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ld a1, 23(a1)
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: rev8 a2, a0
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: rev8 a3, a1
-; CHECK-UNALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB32_5
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB34_5
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: # %bb.4:
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: li a0, 0
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ret
-; CHECK-UNALIGNED-RV64-ZBKB-NEXT: .LBB32_5: # %res_block
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: .LBB34_5: # %res_block
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: sltu a0, a2, a3
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: neg a0, a0
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ori a0, a0, 1
@@ -5110,53 +5320,53 @@ define i32 @memcmp_size_32(ptr %s1, ptr %s2) nounwind {
; CHECK-UNALIGNED-RV32-ZBB-NEXT: lw a3, 0(a1)
; CHECK-UNALIGNED-RV32-ZBB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV32-ZBB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB33_9
+; CHECK-UNALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB35_9
; CHECK-UNALIGNED-RV32-ZBB-NEXT: # %bb.1: # %loadbb1
; CHECK-UNALIGNED-RV32-ZBB-NEXT: lw a2, 4(a0)
; CHECK-UNALIGNED-RV32-ZBB-NEXT: lw a3, 4(a1)
; CHECK-UNALIGNED-RV32-ZBB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV32-ZBB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB33_9
+; CHECK-UNALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB35_9
; CHECK-UNALIGNED-RV32-ZBB-NEXT: # %bb.2: # %loadbb2
; CHECK-UNALIGNED-RV32-ZBB-NEXT: lw a2, 8(a0)
; CHECK-UNALIGNED-RV32-ZBB-NEXT: lw a3, 8(a1)
; CHECK-UNALIGNED-RV32-ZBB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV32-ZBB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB33_9
+; CHECK-UNALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB35_9
; CHECK-UNALIGNED-RV32-ZBB-NEXT: # %bb.3: # %loadbb3
; CHECK-UNALIGNED-RV32-ZBB-NEXT: lw a2, 12(a0)
; CHECK-UNALIGNED-RV32-ZBB-NEXT: lw a3, 12(a1)
; CHECK-UNALIGNED-RV32-ZBB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV32-ZBB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB33_9
+; CHECK-UNALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB35_9
; CHECK-UNALIGNED-RV32-ZBB-NEXT: # %bb.4: # %loadbb4
; CHECK-UNALIGNED-RV32-ZBB-NEXT: lw a2, 16(a0)
; CHECK-UNALIGNED-RV32-ZBB-NEXT: lw a3, 16(a1)
; CHECK-UNALIGNED-RV32-ZBB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV32-ZBB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB33_9
+; CHECK-UNALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB35_9
; CHECK-UNALIGNED-RV32-ZBB-NEXT: # %bb.5: # %loadbb5
; CHECK-UNALIGNED-RV32-ZBB-NEXT: lw a2, 20(a0)
; CHECK-UNALIGNED-RV32-ZBB-NEXT: lw a3, 20(a1)
; CHECK-UNALIGNED-RV32-ZBB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV32-ZBB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB33_9
+; CHECK-UNALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB35_9
; CHECK-UNALIGNED-RV32-ZBB-NEXT: # %bb.6: # %loadbb6
; CHECK-UNALIGNED-RV32-ZBB-NEXT: lw a2, 24(a0)
; CHECK-UNALIGNED-RV32-ZBB-NEXT: lw a3, 24(a1)
; CHECK-UNALIGNED-RV32-ZBB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV32-ZBB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB33_9
+; CHECK-UNALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB35_9
; CHECK-UNALIGNED-RV32-ZBB-NEXT: # %bb.7: # %loadbb7
; CHECK-UNALIGNED-RV32-ZBB-NEXT: lw a0, 28(a0)
; CHECK-UNALIGNED-RV32-ZBB-NEXT: lw a1, 28(a1)
; CHECK-UNALIGNED-RV32-ZBB-NEXT: rev8 a2, a0
; CHECK-UNALIGNED-RV32-ZBB-NEXT: rev8 a3, a1
-; CHECK-UNALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB33_9
+; CHECK-UNALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB35_9
; CHECK-UNALIGNED-RV32-ZBB-NEXT: # %bb.8:
; CHECK-UNALIGNED-RV32-ZBB-NEXT: li a0, 0
; CHECK-UNALIGNED-RV32-ZBB-NEXT: ret
-; CHECK-UNALIGNED-RV32-ZBB-NEXT: .LBB33_9: # %res_block
+; CHECK-UNALIGNED-RV32-ZBB-NEXT: .LBB35_9: # %res_block
; CHECK-UNALIGNED-RV32-ZBB-NEXT: sltu a0, a2, a3
; CHECK-UNALIGNED-RV32-ZBB-NEXT: neg a0, a0
; CHECK-UNALIGNED-RV32-ZBB-NEXT: ori a0, a0, 1
@@ -5168,29 +5378,29 @@ define i32 @memcmp_size_32(ptr %s1, ptr %s2) nounwind {
; CHECK-UNALIGNED-RV64-ZBB-NEXT: ld a3, 0(a1)
; CHECK-UNALIGNED-RV64-ZBB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV64-ZBB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB33_5
+; CHECK-UNALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB35_5
; CHECK-UNALIGNED-RV64-ZBB-NEXT: # %bb.1: # %loadbb1
; CHECK-UNALIGNED-RV64-ZBB-NEXT: ld a2, 8(a0)
; CHECK-UNALIGNED-RV64-ZBB-NEXT: ld a3, 8(a1)
; CHECK-UNALIGNED-RV64-ZBB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV64-ZBB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB33_5
+; CHECK-UNALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB35_5
; CHECK-UNALIGNED-RV64-ZBB-NEXT: # %bb.2: # %loadbb2
; CHECK-UNALIGNED-RV64-ZBB-NEXT: ld a2, 16(a0)
; CHECK-UNALIGNED-RV64-ZBB-NEXT: ld a3, 16(a1)
; CHECK-UNALIGNED-RV64-ZBB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV64-ZBB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB33_5
+; CHECK-UNALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB35_5
; CHECK-UNALIGNED-RV64-ZBB-NEXT: # %bb.3: # %loadbb3
; CHECK-UNALIGNED-RV64-ZBB-NEXT: ld a0, 24(a0)
; CHECK-UNALIGNED-RV64-ZBB-NEXT: ld a1, 24(a1)
; CHECK-UNALIGNED-RV64-ZBB-NEXT: rev8 a2, a0
; CHECK-UNALIGNED-RV64-ZBB-NEXT: rev8 a3, a1
-; CHECK-UNALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB33_5
+; CHECK-UNALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB35_5
; CHECK-UNALIGNED-RV64-ZBB-NEXT: # %bb.4:
; CHECK-UNALIGNED-RV64-ZBB-NEXT: li a0, 0
; CHECK-UNALIGNED-RV64-ZBB-NEXT: ret
-; CHECK-UNALIGNED-RV64-ZBB-NEXT: .LBB33_5: # %res_block
+; CHECK-UNALIGNED-RV64-ZBB-NEXT: .LBB35_5: # %res_block
; CHECK-UNALIGNED-RV64-ZBB-NEXT: sltu a0, a2, a3
; CHECK-UNALIGNED-RV64-ZBB-NEXT: neg a0, a0
; CHECK-UNALIGNED-RV64-ZBB-NEXT: ori a0, a0, 1
@@ -5202,53 +5412,53 @@ define i32 @memcmp_size_32(ptr %s1, ptr %s2) nounwind {
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: lw a3, 0(a1)
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB33_9
+; CHECK-UNALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB35_9
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: # %bb.1: # %loadbb1
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: lw a2, 4(a0)
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: lw a3, 4(a1)
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB33_9
+; CHECK-UNALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB35_9
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: # %bb.2: # %loadbb2
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: lw a2, 8(a0)
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: lw a3, 8(a1)
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB33_9
+; CHECK-UNALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB35_9
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: # %bb.3: # %loadbb3
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: lw a2, 12(a0)
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: lw a3, 12(a1)
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB33_9
+; CHECK-UNALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB35_9
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: # %bb.4: # %loadbb4
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: lw a2, 16(a0)
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: lw a3, 16(a1)
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB33_9
+; CHECK-UNALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB35_9
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: # %bb.5: # %loadbb5
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: lw a2, 20(a0)
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: lw a3, 20(a1)
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB33_9
+; CHECK-UNALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB35_9
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: # %bb.6: # %loadbb6
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: lw a2, 24(a0)
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: lw a3, 24(a1)
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB33_9
+; CHECK-UNALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB35_9
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: # %bb.7: # %loadbb7
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: lw a0, 28(a0)
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: lw a1, 28(a1)
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: rev8 a2, a0
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: rev8 a3, a1
-; CHECK-UNALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB33_9
+; CHECK-UNALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB35_9
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: # %bb.8:
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: li a0, 0
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: ret
-; CHECK-UNALIGNED-RV32-ZBKB-NEXT: .LBB33_9: # %res_block
+; CHECK-UNALIGNED-RV32-ZBKB-NEXT: .LBB35_9: # %res_block
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: sltu a0, a2, a3
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: neg a0, a0
; CHECK-UNALIGNED-RV32-ZBKB-NEXT: ori a0, a0, 1
@@ -5260,29 +5470,29 @@ define i32 @memcmp_size_32(ptr %s1, ptr %s2) nounwind {
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ld a3, 0(a1)
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB33_5
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB35_5
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: # %bb.1: # %loadbb1
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ld a2, 8(a0)
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ld a3, 8(a1)
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB33_5
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB35_5
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: # %bb.2: # %loadbb2
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ld a2, 16(a0)
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ld a3, 16(a1)
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB33_5
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB35_5
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: # %bb.3: # %loadbb3
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ld a0, 24(a0)
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ld a1, 24(a1)
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: rev8 a2, a0
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: rev8 a3, a1
-; CHECK-UNALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB33_5
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB35_5
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: # %bb.4:
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: li a0, 0
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ret
-; CHECK-UNALIGNED-RV64-ZBKB-NEXT: .LBB33_5: # %res_block
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: .LBB35_5: # %res_block
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: sltu a0, a2, a3
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: neg a0, a0
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ori a0, a0, 1
@@ -5379,53 +5589,53 @@ define i32 @memcmp_size_63(ptr %s1, ptr %s2) nounwind {
; CHECK-UNALIGNED-RV64-ZBB-NEXT: ld a3, 0(a1)
; CHECK-UNALIGNED-RV64-ZBB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV64-ZBB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB34_9
+; CHECK-UNALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB36_9
; CHECK-UNALIGNED-RV64-ZBB-NEXT: # %bb.1: # %loadbb1
; CHECK-UNALIGNED-RV64-ZBB-NEXT: ld a2, 8(a0)
; CHECK-UNALIGNED-RV64-ZBB-NEXT: ld a3, 8(a1)
; CHECK-UNALIGNED-RV64-ZBB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV64-ZBB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB34_9
+; CHECK-UNALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB36_9
; CHECK-UNALIGNED-RV64-ZBB-NEXT: # %bb.2: # %loadbb2
; CHECK-UNALIGNED-RV64-ZBB-NEXT: ld a2, 16(a0)
; CHECK-UNALIGNED-RV64-ZBB-NEXT: ld a3, 16(a1)
; CHECK-UNALIGNED-RV64-ZBB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV64-ZBB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB34_9
+; CHECK-UNALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB36_9
; CHECK-UNALIGNED-RV64-ZBB-NEXT: # %bb.3: # %loadbb3
; CHECK-UNALIGNED-RV64-ZBB-NEXT: ld a2, 24(a0)
; CHECK-UNALIGNED-RV64-ZBB-NEXT: ld a3, 24(a1)
; CHECK-UNALIGNED-RV64-ZBB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV64-ZBB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB34_9
+; CHECK-UNALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB36_9
; CHECK-UNALIGNED-RV64-ZBB-NEXT: # %bb.4: # %loadbb4
; CHECK-UNALIGNED-RV64-ZBB-NEXT: ld a2, 32(a0)
; CHECK-UNALIGNED-RV64-ZBB-NEXT: ld a3, 32(a1)
; CHECK-UNALIGNED-RV64-ZBB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV64-ZBB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB34_9
+; CHECK-UNALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB36_9
; CHECK-UNALIGNED-RV64-ZBB-NEXT: # %bb.5: # %loadbb5
; CHECK-UNALIGNED-RV64-ZBB-NEXT: ld a2, 40(a0)
; CHECK-UNALIGNED-RV64-ZBB-NEXT: ld a3, 40(a1)
; CHECK-UNALIGNED-RV64-ZBB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV64-ZBB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB34_9
+; CHECK-UNALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB36_9
; CHECK-UNALIGNED-RV64-ZBB-NEXT: # %bb.6: # %loadbb6
; CHECK-UNALIGNED-RV64-ZBB-NEXT: ld a2, 48(a0)
; CHECK-UNALIGNED-RV64-ZBB-NEXT: ld a3, 48(a1)
; CHECK-UNALIGNED-RV64-ZBB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV64-ZBB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB34_9
+; CHECK-UNALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB36_9
; CHECK-UNALIGNED-RV64-ZBB-NEXT: # %bb.7: # %loadbb7
; CHECK-UNALIGNED-RV64-ZBB-NEXT: ld a0, 55(a0)
; CHECK-UNALIGNED-RV64-ZBB-NEXT: ld a1, 55(a1)
; CHECK-UNALIGNED-RV64-ZBB-NEXT: rev8 a2, a0
; CHECK-UNALIGNED-RV64-ZBB-NEXT: rev8 a3, a1
-; CHECK-UNALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB34_9
+; CHECK-UNALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB36_9
; CHECK-UNALIGNED-RV64-ZBB-NEXT: # %bb.8:
; CHECK-UNALIGNED-RV64-ZBB-NEXT: li a0, 0
; CHECK-UNALIGNED-RV64-ZBB-NEXT: ret
-; CHECK-UNALIGNED-RV64-ZBB-NEXT: .LBB34_9: # %res_block
+; CHECK-UNALIGNED-RV64-ZBB-NEXT: .LBB36_9: # %res_block
; CHECK-UNALIGNED-RV64-ZBB-NEXT: sltu a0, a2, a3
; CHECK-UNALIGNED-RV64-ZBB-NEXT: neg a0, a0
; CHECK-UNALIGNED-RV64-ZBB-NEXT: ori a0, a0, 1
@@ -5437,53 +5647,53 @@ define i32 @memcmp_size_63(ptr %s1, ptr %s2) nounwind {
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ld a3, 0(a1)
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB34_9
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB36_9
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: # %bb.1: # %loadbb1
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ld a2, 8(a0)
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ld a3, 8(a1)
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB34_9
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB36_9
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: # %bb.2: # %loadbb2
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ld a2, 16(a0)
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ld a3, 16(a1)
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB34_9
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB36_9
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: # %bb.3: # %loadbb3
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ld a2, 24(a0)
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ld a3, 24(a1)
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB34_9
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB36_9
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: # %bb.4: # %loadbb4
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ld a2, 32(a0)
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ld a3, 32(a1)
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB34_9
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB36_9
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: # %bb.5: # %loadbb5
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ld a2, 40(a0)
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ld a3, 40(a1)
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB34_9
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB36_9
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: # %bb.6: # %loadbb6
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ld a2, 48(a0)
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ld a3, 48(a1)
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB34_9
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB36_9
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: # %bb.7: # %loadbb7
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ld a0, 55(a0)
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ld a1, 55(a1)
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: rev8 a2, a0
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: rev8 a3, a1
-; CHECK-UNALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB34_9
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB36_9
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: # %bb.8:
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: li a0, 0
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ret
-; CHECK-UNALIGNED-RV64-ZBKB-NEXT: .LBB34_9: # %res_block
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: .LBB36_9: # %res_block
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: sltu a0, a2, a3
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: neg a0, a0
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ori a0, a0, 1
@@ -5570,53 +5780,53 @@ define i32 @memcmp_size_64(ptr %s1, ptr %s2) nounwind {
; CHECK-UNALIGNED-RV64-ZBB-NEXT: ld a3, 0(a1)
; CHECK-UNALIGNED-RV64-ZBB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV64-ZBB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB35_9
+; CHECK-UNALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB37_9
; CHECK-UNALIGNED-RV64-ZBB-NEXT: # %bb.1: # %loadbb1
; CHECK-UNALIGNED-RV64-ZBB-NEXT: ld a2, 8(a0)
; CHECK-UNALIGNED-RV64-ZBB-NEXT: ld a3, 8(a1)
; CHECK-UNALIGNED-RV64-ZBB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV64-ZBB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB35_9
+; CHECK-UNALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB37_9
; CHECK-UNALIGNED-RV64-ZBB-NEXT: # %bb.2: # %loadbb2
; CHECK-UNALIGNED-RV64-ZBB-NEXT: ld a2, 16(a0)
; CHECK-UNALIGNED-RV64-ZBB-NEXT: ld a3, 16(a1)
; CHECK-UNALIGNED-RV64-ZBB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV64-ZBB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB35_9
+; CHECK-UNALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB37_9
; CHECK-UNALIGNED-RV64-ZBB-NEXT: # %bb.3: # %loadbb3
; CHECK-UNALIGNED-RV64-ZBB-NEXT: ld a2, 24(a0)
; CHECK-UNALIGNED-RV64-ZBB-NEXT: ld a3, 24(a1)
; CHECK-UNALIGNED-RV64-ZBB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV64-ZBB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB35_9
+; CHECK-UNALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB37_9
; CHECK-UNALIGNED-RV64-ZBB-NEXT: # %bb.4: # %loadbb4
; CHECK-UNALIGNED-RV64-ZBB-NEXT: ld a2, 32(a0)
; CHECK-UNALIGNED-RV64-ZBB-NEXT: ld a3, 32(a1)
; CHECK-UNALIGNED-RV64-ZBB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV64-ZBB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB35_9
+; CHECK-UNALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB37_9
; CHECK-UNALIGNED-RV64-ZBB-NEXT: # %bb.5: # %loadbb5
; CHECK-UNALIGNED-RV64-ZBB-NEXT: ld a2, 40(a0)
; CHECK-UNALIGNED-RV64-ZBB-NEXT: ld a3, 40(a1)
; CHECK-UNALIGNED-RV64-ZBB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV64-ZBB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB35_9
+; CHECK-UNALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB37_9
; CHECK-UNALIGNED-RV64-ZBB-NEXT: # %bb.6: # %loadbb6
; CHECK-UNALIGNED-RV64-ZBB-NEXT: ld a2, 48(a0)
; CHECK-UNALIGNED-RV64-ZBB-NEXT: ld a3, 48(a1)
; CHECK-UNALIGNED-RV64-ZBB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV64-ZBB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB35_9
+; CHECK-UNALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB37_9
; CHECK-UNALIGNED-RV64-ZBB-NEXT: # %bb.7: # %loadbb7
; CHECK-UNALIGNED-RV64-ZBB-NEXT: ld a0, 56(a0)
; CHECK-UNALIGNED-RV64-ZBB-NEXT: ld a1, 56(a1)
; CHECK-UNALIGNED-RV64-ZBB-NEXT: rev8 a2, a0
; CHECK-UNALIGNED-RV64-ZBB-NEXT: rev8 a3, a1
-; CHECK-UNALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB35_9
+; CHECK-UNALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB37_9
; CHECK-UNALIGNED-RV64-ZBB-NEXT: # %bb.8:
; CHECK-UNALIGNED-RV64-ZBB-NEXT: li a0, 0
; CHECK-UNALIGNED-RV64-ZBB-NEXT: ret
-; CHECK-UNALIGNED-RV64-ZBB-NEXT: .LBB35_9: # %res_block
+; CHECK-UNALIGNED-RV64-ZBB-NEXT: .LBB37_9: # %res_block
; CHECK-UNALIGNED-RV64-ZBB-NEXT: sltu a0, a2, a3
; CHECK-UNALIGNED-RV64-ZBB-NEXT: neg a0, a0
; CHECK-UNALIGNED-RV64-ZBB-NEXT: ori a0, a0, 1
@@ -5628,53 +5838,53 @@ define i32 @memcmp_size_64(ptr %s1, ptr %s2) nounwind {
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ld a3, 0(a1)
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB35_9
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB37_9
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: # %bb.1: # %loadbb1
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ld a2, 8(a0)
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ld a3, 8(a1)
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB35_9
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB37_9
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: # %bb.2: # %loadbb2
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ld a2, 16(a0)
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ld a3, 16(a1)
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB35_9
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB37_9
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: # %bb.3: # %loadbb3
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ld a2, 24(a0)
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ld a3, 24(a1)
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB35_9
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB37_9
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: # %bb.4: # %loadbb4
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ld a2, 32(a0)
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ld a3, 32(a1)
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB35_9
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB37_9
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: # %bb.5: # %loadbb5
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ld a2, 40(a0)
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ld a3, 40(a1)
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB35_9
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB37_9
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: # %bb.6: # %loadbb6
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ld a2, 48(a0)
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ld a3, 48(a1)
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: rev8 a2, a2
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: rev8 a3, a3
-; CHECK-UNALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB35_9
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB37_9
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: # %bb.7: # %loadbb7
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ld a0, 56(a0)
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ld a1, 56(a1)
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: rev8 a2, a0
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: rev8 a3, a1
-; CHECK-UNALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB35_9
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB37_9
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: # %bb.8:
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: li a0, 0
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ret
-; CHECK-UNALIGNED-RV64-ZBKB-NEXT: .LBB35_9: # %res_block
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: .LBB37_9: # %res_block
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: sltu a0, a2, a3
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: neg a0, a0
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ori a0, a0, 1
@@ -6336,5 +6546,401 @@ entry:
%ret = icmp sgt i32 %memcmp, 0
ret i1 %ret
}
+
+define i1 @memcmp_le_zero(ptr %s1, ptr %s2) nounwind {
+; CHECK-ALIGNED-RV32-LABEL: memcmp_le_zero:
+; CHECK-ALIGNED-RV32: # %bb.0: # %entry
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-NEXT: slti a0, a0, 1
+; CHECK-ALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV32-NEXT: ret
+;
+; CHECK-ALIGNED-RV64-LABEL: memcmp_le_zero:
+; CHECK-ALIGNED-RV64: # %bb.0: # %entry
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-NEXT: slti a0, a0, 1
+; CHECK-ALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV64-NEXT: ret
+;
+; CHECK-ALIGNED-RV32-ZBB-LABEL: memcmp_le_zero:
+; CHECK-ALIGNED-RV32-ZBB: # %bb.0: # %entry
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-ZBB-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-ZBB-NEXT: slti a0, a0, 1
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
+;
+; CHECK-ALIGNED-RV64-ZBB-LABEL: memcmp_le_zero:
+; CHECK-ALIGNED-RV64-ZBB: # %bb.0: # %entry
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-ZBB-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-ZBB-NEXT: slti a0, a0, 1
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
+;
+; CHECK-ALIGNED-RV32-ZBKB-LABEL: memcmp_le_zero:
+; CHECK-ALIGNED-RV32-ZBKB: # %bb.0: # %entry
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: slti a0, a0, 1
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
+;
+; CHECK-ALIGNED-RV64-ZBKB-LABEL: memcmp_le_zero:
+; CHECK-ALIGNED-RV64-ZBKB: # %bb.0: # %entry
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: slti a0, a0, 1
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
+;
+; CHECK-ALIGNED-RV32-V-LABEL: memcmp_le_zero:
+; CHECK-ALIGNED-RV32-V: # %bb.0: # %entry
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-V-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-V-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-V-NEXT: slti a0, a0, 1
+; CHECK-ALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV32-V-NEXT: ret
+;
+; CHECK-ALIGNED-RV64-V-LABEL: memcmp_le_zero:
+; CHECK-ALIGNED-RV64-V: # %bb.0: # %entry
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-V-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-V-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-V-NEXT: slti a0, a0, 1
+; CHECK-ALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV64-V-NEXT: ret
+;
+; CHECK-UNALIGNED-RV32-LABEL: memcmp_le_zero:
+; CHECK-UNALIGNED-RV32: # %bb.0: # %entry
+; CHECK-UNALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-UNALIGNED-RV32-NEXT: li a2, 4
+; CHECK-UNALIGNED-RV32-NEXT: call memcmp
+; CHECK-UNALIGNED-RV32-NEXT: slti a0, a0, 1
+; CHECK-UNALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-UNALIGNED-RV32-NEXT: addi sp, sp, 16
+; CHECK-UNALIGNED-RV32-NEXT: ret
+;
+; CHECK-UNALIGNED-RV64-LABEL: memcmp_le_zero:
+; CHECK-UNALIGNED-RV64: # %bb.0: # %entry
+; CHECK-UNALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-UNALIGNED-RV64-NEXT: li a2, 4
+; CHECK-UNALIGNED-RV64-NEXT: call memcmp
+; CHECK-UNALIGNED-RV64-NEXT: slti a0, a0, 1
+; CHECK-UNALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-UNALIGNED-RV64-NEXT: addi sp, sp, 16
+; CHECK-UNALIGNED-RV64-NEXT: ret
+;
+; CHECK-UNALIGNED-RV32-ZBB-LABEL: memcmp_le_zero:
+; CHECK-UNALIGNED-RV32-ZBB: # %bb.0: # %entry
+; CHECK-UNALIGNED-RV32-ZBB-NEXT: lw a0, 0(a0)
+; CHECK-UNALIGNED-RV32-ZBB-NEXT: lw a1, 0(a1)
+; CHECK-UNALIGNED-RV32-ZBB-NEXT: rev8 a0, a0
+; CHECK-UNALIGNED-RV32-ZBB-NEXT: rev8 a1, a1
+; CHECK-UNALIGNED-RV32-ZBB-NEXT: sltu a2, a0, a1
+; CHECK-UNALIGNED-RV32-ZBB-NEXT: sltu a0, a1, a0
+; CHECK-UNALIGNED-RV32-ZBB-NEXT: sub a0, a0, a2
+; CHECK-UNALIGNED-RV32-ZBB-NEXT: slti a0, a0, 1
+; CHECK-UNALIGNED-RV32-ZBB-NEXT: ret
+;
+; CHECK-UNALIGNED-RV64-ZBB-LABEL: memcmp_le_zero:
+; CHECK-UNALIGNED-RV64-ZBB: # %bb.0: # %entry
+; CHECK-UNALIGNED-RV64-ZBB-NEXT: lw a0, 0(a0)
+; CHECK-UNALIGNED-RV64-ZBB-NEXT: lw a1, 0(a1)
+; CHECK-UNALIGNED-RV64-ZBB-NEXT: rev8 a0, a0
+; CHECK-UNALIGNED-RV64-ZBB-NEXT: rev8 a1, a1
+; CHECK-UNALIGNED-RV64-ZBB-NEXT: srli a0, a0, 32
+; CHECK-UNALIGNED-RV64-ZBB-NEXT: srli a1, a1, 32
+; CHECK-UNALIGNED-RV64-ZBB-NEXT: sltu a2, a0, a1
+; CHECK-UNALIGNED-RV64-ZBB-NEXT: sltu a0, a1, a0
+; CHECK-UNALIGNED-RV64-ZBB-NEXT: sub a0, a0, a2
+; CHECK-UNALIGNED-RV64-ZBB-NEXT: slti a0, a0, 1
+; CHECK-UNALIGNED-RV64-ZBB-NEXT: ret
+;
+; CHECK-UNALIGNED-RV32-ZBKB-LABEL: memcmp_le_zero:
+; CHECK-UNALIGNED-RV32-ZBKB: # %bb.0: # %entry
+; CHECK-UNALIGNED-RV32-ZBKB-NEXT: lw a0, 0(a0)
+; CHECK-UNALIGNED-RV32-ZBKB-NEXT: lw a1, 0(a1)
+; CHECK-UNALIGNED-RV32-ZBKB-NEXT: rev8 a0, a0
+; CHECK-UNALIGNED-RV32-ZBKB-NEXT: rev8 a1, a1
+; CHECK-UNALIGNED-RV32-ZBKB-NEXT: sltu a2, a0, a1
+; CHECK-UNALIGNED-RV32-ZBKB-NEXT: sltu a0, a1, a0
+; CHECK-UNALIGNED-RV32-ZBKB-NEXT: sub a0, a0, a2
+; CHECK-UNALIGNED-RV32-ZBKB-NEXT: slti a0, a0, 1
+; CHECK-UNALIGNED-RV32-ZBKB-NEXT: ret
+;
+; CHECK-UNALIGNED-RV64-ZBKB-LABEL: memcmp_le_zero:
+; CHECK-UNALIGNED-RV64-ZBKB: # %bb.0: # %entry
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: lw a0, 0(a0)
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: lw a1, 0(a1)
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: rev8 a0, a0
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: rev8 a1, a1
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: srli a0, a0, 32
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: srli a1, a1, 32
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: sltu a2, a0, a1
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: sltu a0, a1, a0
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: sub a0, a0, a2
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: slti a0, a0, 1
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ret
+;
+; CHECK-UNALIGNED-RV32-V-LABEL: memcmp_le_zero:
+; CHECK-UNALIGNED-RV32-V: # %bb.0: # %entry
+; CHECK-UNALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-UNALIGNED-RV32-V-NEXT: li a2, 4
+; CHECK-UNALIGNED-RV32-V-NEXT: call memcmp
+; CHECK-UNALIGNED-RV32-V-NEXT: slti a0, a0, 1
+; CHECK-UNALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-UNALIGNED-RV32-V-NEXT: addi sp, sp, 16
+; CHECK-UNALIGNED-RV32-V-NEXT: ret
+;
+; CHECK-UNALIGNED-RV64-V-LABEL: memcmp_le_zero:
+; CHECK-UNALIGNED-RV64-V: # %bb.0: # %entry
+; CHECK-UNALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-UNALIGNED-RV64-V-NEXT: li a2, 4
+; CHECK-UNALIGNED-RV64-V-NEXT: call memcmp
+; CHECK-UNALIGNED-RV64-V-NEXT: slti a0, a0, 1
+; CHECK-UNALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-UNALIGNED-RV64-V-NEXT: addi sp, sp, 16
+; CHECK-UNALIGNED-RV64-V-NEXT: ret
+entry:
+ %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iXLen 4)
+ %ret = icmp slt i32 %memcmp, 1
+ ret i1 %ret
+}
+
+define i1 @memcmp_ge_zero(ptr %s1, ptr %s2) nounwind {
+; CHECK-ALIGNED-RV32-LABEL: memcmp_ge_zero:
+; CHECK-ALIGNED-RV32: # %bb.0: # %entry
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-NEXT: slti a0, a0, 0
+; CHECK-ALIGNED-RV32-NEXT: xori a0, a0, 1
+; CHECK-ALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV32-NEXT: ret
+;
+; CHECK-ALIGNED-RV64-LABEL: memcmp_ge_zero:
+; CHECK-ALIGNED-RV64: # %bb.0: # %entry
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-NEXT: slti a0, a0, 0
+; CHECK-ALIGNED-RV64-NEXT: xori a0, a0, 1
+; CHECK-ALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV64-NEXT: ret
+;
+; CHECK-ALIGNED-RV32-ZBB-LABEL: memcmp_ge_zero:
+; CHECK-ALIGNED-RV32-ZBB: # %bb.0: # %entry
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-ZBB-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-ZBB-NEXT: slti a0, a0, 0
+; CHECK-ALIGNED-RV32-ZBB-NEXT: xori a0, a0, 1
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
+;
+; CHECK-ALIGNED-RV64-ZBB-LABEL: memcmp_ge_zero:
+; CHECK-ALIGNED-RV64-ZBB: # %bb.0: # %entry
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-ZBB-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-ZBB-NEXT: slti a0, a0, 0
+; CHECK-ALIGNED-RV64-ZBB-NEXT: xori a0, a0, 1
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
+;
+; CHECK-ALIGNED-RV32-ZBKB-LABEL: memcmp_ge_zero:
+; CHECK-ALIGNED-RV32-ZBKB: # %bb.0: # %entry
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: slti a0, a0, 0
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: xori a0, a0, 1
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
+;
+; CHECK-ALIGNED-RV64-ZBKB-LABEL: memcmp_ge_zero:
+; CHECK-ALIGNED-RV64-ZBKB: # %bb.0: # %entry
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: slti a0, a0, 0
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: xori a0, a0, 1
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
+;
+; CHECK-ALIGNED-RV32-V-LABEL: memcmp_ge_zero:
+; CHECK-ALIGNED-RV32-V: # %bb.0: # %entry
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-V-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-V-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-V-NEXT: slti a0, a0, 0
+; CHECK-ALIGNED-RV32-V-NEXT: xori a0, a0, 1
+; CHECK-ALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV32-V-NEXT: ret
+;
+; CHECK-ALIGNED-RV64-V-LABEL: memcmp_ge_zero:
+; CHECK-ALIGNED-RV64-V: # %bb.0: # %entry
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-V-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-V-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-V-NEXT: slti a0, a0, 0
+; CHECK-ALIGNED-RV64-V-NEXT: xori a0, a0, 1
+; CHECK-ALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV64-V-NEXT: ret
+;
+; CHECK-UNALIGNED-RV32-LABEL: memcmp_ge_zero:
+; CHECK-UNALIGNED-RV32: # %bb.0: # %entry
+; CHECK-UNALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-UNALIGNED-RV32-NEXT: li a2, 4
+; CHECK-UNALIGNED-RV32-NEXT: call memcmp
+; CHECK-UNALIGNED-RV32-NEXT: slti a0, a0, 0
+; CHECK-UNALIGNED-RV32-NEXT: xori a0, a0, 1
+; CHECK-UNALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-UNALIGNED-RV32-NEXT: addi sp, sp, 16
+; CHECK-UNALIGNED-RV32-NEXT: ret
+;
+; CHECK-UNALIGNED-RV64-LABEL: memcmp_ge_zero:
+; CHECK-UNALIGNED-RV64: # %bb.0: # %entry
+; CHECK-UNALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-UNALIGNED-RV64-NEXT: li a2, 4
+; CHECK-UNALIGNED-RV64-NEXT: call memcmp
+; CHECK-UNALIGNED-RV64-NEXT: slti a0, a0, 0
+; CHECK-UNALIGNED-RV64-NEXT: xori a0, a0, 1
+; CHECK-UNALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-UNALIGNED-RV64-NEXT: addi sp, sp, 16
+; CHECK-UNALIGNED-RV64-NEXT: ret
+;
+; CHECK-UNALIGNED-RV32-ZBB-LABEL: memcmp_ge_zero:
+; CHECK-UNALIGNED-RV32-ZBB: # %bb.0: # %entry
+; CHECK-UNALIGNED-RV32-ZBB-NEXT: lw a0, 0(a0)
+; CHECK-UNALIGNED-RV32-ZBB-NEXT: lw a1, 0(a1)
+; CHECK-UNALIGNED-RV32-ZBB-NEXT: rev8 a0, a0
+; CHECK-UNALIGNED-RV32-ZBB-NEXT: rev8 a1, a1
+; CHECK-UNALIGNED-RV32-ZBB-NEXT: sltu a2, a0, a1
+; CHECK-UNALIGNED-RV32-ZBB-NEXT: sltu a0, a1, a0
+; CHECK-UNALIGNED-RV32-ZBB-NEXT: sub a0, a0, a2
+; CHECK-UNALIGNED-RV32-ZBB-NEXT: slti a0, a0, 0
+; CHECK-UNALIGNED-RV32-ZBB-NEXT: xori a0, a0, 1
+; CHECK-UNALIGNED-RV32-ZBB-NEXT: ret
+;
+; CHECK-UNALIGNED-RV64-ZBB-LABEL: memcmp_ge_zero:
+; CHECK-UNALIGNED-RV64-ZBB: # %bb.0: # %entry
+; CHECK-UNALIGNED-RV64-ZBB-NEXT: lw a0, 0(a0)
+; CHECK-UNALIGNED-RV64-ZBB-NEXT: lw a1, 0(a1)
+; CHECK-UNALIGNED-RV64-ZBB-NEXT: rev8 a0, a0
+; CHECK-UNALIGNED-RV64-ZBB-NEXT: rev8 a1, a1
+; CHECK-UNALIGNED-RV64-ZBB-NEXT: srli a0, a0, 32
+; CHECK-UNALIGNED-RV64-ZBB-NEXT: srli a1, a1, 32
+; CHECK-UNALIGNED-RV64-ZBB-NEXT: sltu a2, a0, a1
+; CHECK-UNALIGNED-RV64-ZBB-NEXT: sltu a0, a1, a0
+; CHECK-UNALIGNED-RV64-ZBB-NEXT: sub a0, a0, a2
+; CHECK-UNALIGNED-RV64-ZBB-NEXT: slti a0, a0, 0
+; CHECK-UNALIGNED-RV64-ZBB-NEXT: xori a0, a0, 1
+; CHECK-UNALIGNED-RV64-ZBB-NEXT: ret
+;
+; CHECK-UNALIGNED-RV32-ZBKB-LABEL: memcmp_ge_zero:
+; CHECK-UNALIGNED-RV32-ZBKB: # %bb.0: # %entry
+; CHECK-UNALIGNED-RV32-ZBKB-NEXT: lw a0, 0(a0)
+; CHECK-UNALIGNED-RV32-ZBKB-NEXT: lw a1, 0(a1)
+; CHECK-UNALIGNED-RV32-ZBKB-NEXT: rev8 a0, a0
+; CHECK-UNALIGNED-RV32-ZBKB-NEXT: rev8 a1, a1
+; CHECK-UNALIGNED-RV32-ZBKB-NEXT: sltu a2, a0, a1
+; CHECK-UNALIGNED-RV32-ZBKB-NEXT: sltu a0, a1, a0
+; CHECK-UNALIGNED-RV32-ZBKB-NEXT: sub a0, a0, a2
+; CHECK-UNALIGNED-RV32-ZBKB-NEXT: slti a0, a0, 0
+; CHECK-UNALIGNED-RV32-ZBKB-NEXT: xori a0, a0, 1
+; CHECK-UNALIGNED-RV32-ZBKB-NEXT: ret
+;
+; CHECK-UNALIGNED-RV64-ZBKB-LABEL: memcmp_ge_zero:
+; CHECK-UNALIGNED-RV64-ZBKB: # %bb.0: # %entry
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: lw a0, 0(a0)
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: lw a1, 0(a1)
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: rev8 a0, a0
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: rev8 a1, a1
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: srli a0, a0, 32
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: srli a1, a1, 32
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: sltu a2, a0, a1
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: sltu a0, a1, a0
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: sub a0, a0, a2
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: slti a0, a0, 0
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: xori a0, a0, 1
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ret
+;
+; CHECK-UNALIGNED-RV32-V-LABEL: memcmp_ge_zero:
+; CHECK-UNALIGNED-RV32-V: # %bb.0: # %entry
+; CHECK-UNALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-UNALIGNED-RV32-V-NEXT: li a2, 4
+; CHECK-UNALIGNED-RV32-V-NEXT: call memcmp
+; CHECK-UNALIGNED-RV32-V-NEXT: slti a0, a0, 0
+; CHECK-UNALIGNED-RV32-V-NEXT: xori a0, a0, 1
+; CHECK-UNALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-UNALIGNED-RV32-V-NEXT: addi sp, sp, 16
+; CHECK-UNALIGNED-RV32-V-NEXT: ret
+;
+; CHECK-UNALIGNED-RV64-V-LABEL: memcmp_ge_zero:
+; CHECK-UNALIGNED-RV64-V: # %bb.0: # %entry
+; CHECK-UNALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-UNALIGNED-RV64-V-NEXT: li a2, 4
+; CHECK-UNALIGNED-RV64-V-NEXT: call memcmp
+; CHECK-UNALIGNED-RV64-V-NEXT: slti a0, a0, 0
+; CHECK-UNALIGNED-RV64-V-NEXT: xori a0, a0, 1
+; CHECK-UNALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-UNALIGNED-RV64-V-NEXT: addi sp, sp, 16
+; CHECK-UNALIGNED-RV64-V-NEXT: ret
+entry:
+ %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iXLen 4)
+ %ret = icmp sgt i32 %memcmp, -1
+ ret i1 %ret
+}
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
; CHECK-ALIGNED: {{.*}}
diff --git a/llvm/test/CodeGen/X86/memcmp.ll b/llvm/test/CodeGen/X86/memcmp.ll
index 014db331606069..e744d2a06e55f6 100644
--- a/llvm/test/CodeGen/X86/memcmp.ll
+++ b/llvm/test/CodeGen/X86/memcmp.ll
@@ -260,6 +260,44 @@ define i1 @length4_gt(ptr %X, ptr %Y) nounwind {
ret i1 %c
}
+define i1 @length4_le(ptr %X, ptr %Y) nounwind {
+; X64-LABEL: length4_le:
+; X64: # %bb.0:
+; X64-NEXT: movl (%rdi), %eax
+; X64-NEXT: movl (%rsi), %ecx
+; X64-NEXT: bswapl %eax
+; X64-NEXT: bswapl %ecx
+; X64-NEXT: cmpl %ecx, %eax
+; X64-NEXT: seta %al
+; X64-NEXT: sbbb $0, %al
+; X64-NEXT: movsbl %al, %eax
+; X64-NEXT: testl %eax, %eax
+; X64-NEXT: setle %al
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(ptr %X, ptr %Y, i64 4) nounwind
+ %c = icmp slt i32 %m, 1
+ ret i1 %c
+}
+
+define i1 @length4_ge(ptr %X, ptr %Y) nounwind {
+; X64-LABEL: length4_ge:
+; X64: # %bb.0:
+; X64-NEXT: movl (%rdi), %eax
+; X64-NEXT: movl (%rsi), %ecx
+; X64-NEXT: bswapl %eax
+; X64-NEXT: bswapl %ecx
+; X64-NEXT: cmpl %ecx, %eax
+; X64-NEXT: seta %al
+; X64-NEXT: sbbb $0, %al
+; X64-NEXT: movsbl %al, %eax
+; X64-NEXT: testl %eax, %eax
+; X64-NEXT: setns %al
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(ptr %X, ptr %Y, i64 4) nounwind
+ %c = icmp sgt i32 %m, -1
+ ret i1 %c
+}
+
define i1 @length4_eq_const(ptr %X) nounwind {
; X64-LABEL: length4_eq_const:
; X64: # %bb.0:
@@ -279,13 +317,13 @@ define i32 @length5(ptr %X, ptr %Y) nounwind {
; X64-NEXT: bswapl %ecx
; X64-NEXT: bswapl %edx
; X64-NEXT: cmpl %edx, %ecx
-; X64-NEXT: jne .LBB18_3
+; X64-NEXT: jne .LBB20_3
; X64-NEXT: # %bb.1: # %loadbb1
; X64-NEXT: movzbl 4(%rdi), %eax
; X64-NEXT: movzbl 4(%rsi), %ecx
; X64-NEXT: subl %ecx, %eax
; X64-NEXT: retq
-; X64-NEXT: .LBB18_3: # %res_block
+; X64-NEXT: .LBB20_3: # %res_block
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: cmpl %edx, %ecx
; X64-NEXT: sbbl %eax, %eax
@@ -319,7 +357,7 @@ define i1 @length5_lt(ptr %X, ptr %Y) nounwind {
; X64-NEXT: bswapl %ecx
; X64-NEXT: bswapl %edx
; X64-NEXT: cmpl %edx, %ecx
-; X64-NEXT: jne .LBB20_3
+; X64-NEXT: jne .LBB22_3
; X64-NEXT: # %bb.1: # %loadbb1
; X64-NEXT: movzbl 4(%rdi), %eax
; X64-NEXT: movzbl 4(%rsi), %ecx
@@ -327,7 +365,7 @@ define i1 @length5_lt(ptr %X, ptr %Y) nounwind {
; X64-NEXT: shrl $31, %eax
; X64-NEXT: # kill: def $al killed $al killed $eax
; X64-NEXT: retq
-; X64-NEXT: .LBB20_3: # %res_block
+; X64-NEXT: .LBB22_3: # %res_block
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: cmpl %edx, %ecx
; X64-NEXT: sbbl %eax, %eax
@@ -348,7 +386,7 @@ define i32 @length7(ptr %X, ptr %Y) nounwind {
; X64-NEXT: bswapl %ecx
; X64-NEXT: bswapl %edx
; X64-NEXT: cmpl %edx, %ecx
-; X64-NEXT: jne .LBB21_2
+; X64-NEXT: jne .LBB23_2
; X64-NEXT: # %bb.1: # %loadbb1
; X64-NEXT: movl 3(%rdi), %ecx
; X64-NEXT: movl 3(%rsi), %edx
@@ -356,13 +394,13 @@ define i32 @length7(ptr %X, ptr %Y) nounwind {
; X64-NEXT: bswapl %edx
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: cmpl %edx, %ecx
-; X64-NEXT: je .LBB21_3
-; X64-NEXT: .LBB21_2: # %res_block
+; X64-NEXT: je .LBB23_3
+; X64-NEXT: .LBB23_2: # %res_block
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: cmpl %edx, %ecx
; X64-NEXT: sbbl %eax, %eax
; X64-NEXT: orl $1, %eax
-; X64-NEXT: .LBB21_3: # %endblock
+; X64-NEXT: .LBB23_3: # %endblock
; X64-NEXT: retq
%m = tail call i32 @memcmp(ptr %X, ptr %Y, i64 7) nounwind
ret i32 %m
@@ -376,7 +414,7 @@ define i1 @length7_lt(ptr %X, ptr %Y) nounwind {
; X64-NEXT: bswapl %ecx
; X64-NEXT: bswapl %edx
; X64-NEXT: cmpl %edx, %ecx
-; X64-NEXT: jne .LBB22_2
+; X64-NEXT: jne .LBB24_2
; X64-NEXT: # %bb.1: # %loadbb1
; X64-NEXT: movl 3(%rdi), %ecx
; X64-NEXT: movl 3(%rsi), %edx
@@ -384,13 +422,13 @@ define i1 @length7_lt(ptr %X, ptr %Y) nounwind {
; X64-NEXT: bswapl %edx
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: cmpl %edx, %ecx
-; X64-NEXT: je .LBB22_3
-; X64-NEXT: .LBB22_2: # %res_block
+; X64-NEXT: je .LBB24_3
+; X64-NEXT: .LBB24_2: # %res_block
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: cmpl %edx, %ecx
; X64-NEXT: sbbl %eax, %eax
; X64-NEXT: orl $1, %eax
-; X64-NEXT: .LBB22_3: # %endblock
+; X64-NEXT: .LBB24_3: # %endblock
; X64-NEXT: shrl $31, %eax
; X64-NEXT: # kill: def $al killed $al killed $eax
; X64-NEXT: retq
@@ -524,7 +562,7 @@ define i32 @length12(ptr %X, ptr %Y) nounwind {
; X64-NEXT: bswapq %rcx
; X64-NEXT: bswapq %rdx
; X64-NEXT: cmpq %rdx, %rcx
-; X64-NEXT: jne .LBB31_2
+; X64-NEXT: jne .LBB33_2
; X64-NEXT: # %bb.1: # %loadbb1
; X64-NEXT: movl 8(%rdi), %ecx
; X64-NEXT: movl 8(%rsi), %edx
@@ -532,13 +570,13 @@ define i32 @length12(ptr %X, ptr %Y) nounwind {
; X64-NEXT: bswapl %edx
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: cmpq %rdx, %rcx
-; X64-NEXT: je .LBB31_3
-; X64-NEXT: .LBB31_2: # %res_block
+; X64-NEXT: je .LBB33_3
+; X64-NEXT: .LBB33_2: # %res_block
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: cmpq %rdx, %rcx
; X64-NEXT: sbbl %eax, %eax
; X64-NEXT: orl $1, %eax
-; X64-NEXT: .LBB31_3: # %endblock
+; X64-NEXT: .LBB33_3: # %endblock
; X64-NEXT: retq
%m = tail call i32 @memcmp(ptr %X, ptr %Y, i64 12) nounwind
ret i32 %m
@@ -582,7 +620,7 @@ define i32 @length15(ptr %X, ptr %Y) nounwind {
; X64-NEXT: bswapq %rcx
; X64-NEXT: bswapq %rdx
; X64-NEXT: cmpq %rdx, %rcx
-; X64-NEXT: jne .LBB34_2
+; X64-NEXT: jne .LBB36_2
; X64-NEXT: # %bb.1: # %loadbb1
; X64-NEXT: movq 7(%rdi), %rcx
; X64-NEXT: movq 7(%rsi), %rdx
@@ -590,13 +628,13 @@ define i32 @length15(ptr %X, ptr %Y) nounwind {
; X64-NEXT: bswapq %rdx
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: cmpq %rdx, %rcx
-; X64-NEXT: je .LBB34_3
-; X64-NEXT: .LBB34_2: # %res_block
+; X64-NEXT: je .LBB36_3
+; X64-NEXT: .LBB36_2: # %res_block
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: cmpq %rdx, %rcx
; X64-NEXT: sbbl %eax, %eax
; X64-NEXT: orl $1, %eax
-; X64-NEXT: .LBB34_3: # %endblock
+; X64-NEXT: .LBB36_3: # %endblock
; X64-NEXT: retq
%m = tail call i32 @memcmp(ptr %X, ptr %Y, i64 15) nounwind
ret i32 %m
@@ -610,7 +648,7 @@ define i1 @length15_lt(ptr %X, ptr %Y) nounwind {
; X64-NEXT: bswapq %rcx
; X64-NEXT: bswapq %rdx
; X64-NEXT: cmpq %rdx, %rcx
-; X64-NEXT: jne .LBB35_2
+; X64-NEXT: jne .LBB37_2
; X64-NEXT: # %bb.1: # %loadbb1
; X64-NEXT: movq 7(%rdi), %rcx
; X64-NEXT: movq 7(%rsi), %rdx
@@ -618,13 +656,13 @@ define i1 @length15_lt(ptr %X, ptr %Y) nounwind {
; X64-NEXT: bswapq %rdx
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: cmpq %rdx, %rcx
-; X64-NEXT: je .LBB35_3
-; X64-NEXT: .LBB35_2: # %res_block
+; X64-NEXT: je .LBB37_3
+; X64-NEXT: .LBB37_2: # %res_block
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: cmpq %rdx, %rcx
; X64-NEXT: sbbl %eax, %eax
; X64-NEXT: orl $1, %eax
-; X64-NEXT: .LBB35_3: # %endblock
+; X64-NEXT: .LBB37_3: # %endblock
; X64-NEXT: shrl $31, %eax
; X64-NEXT: # kill: def $al killed $al killed $eax
; X64-NEXT: retq
@@ -640,20 +678,20 @@ define i32 @length15_const(ptr %X, ptr %Y) nounwind {
; X64-NEXT: movq (%rdi), %rdx
; X64-NEXT: bswapq %rdx
; X64-NEXT: cmpq %rcx, %rdx
-; X64-NEXT: jne .LBB36_2
+; X64-NEXT: jne .LBB38_2
; X64-NEXT: # %bb.1: # %loadbb1
; X64-NEXT: movabsq $4051322327650219061, %rcx # imm = 0x3839303132333435
; X64-NEXT: movq 7(%rdi), %rdx
; X64-NEXT: bswapq %rdx
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: cmpq %rcx, %rdx
-; X64-NEXT: je .LBB36_3
-; X64-NEXT: .LBB36_2: # %res_block
+; X64-NEXT: je .LBB38_3
+; X64-NEXT: .LBB38_2: # %res_block
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: cmpq %rcx, %rdx
; X64-NEXT: sbbl %eax, %eax
; X64-NEXT: orl $1, %eax
-; X64-NEXT: .LBB36_3: # %endblock
+; X64-NEXT: .LBB38_3: # %endblock
; X64-NEXT: retq
%m = tail call i32 @memcmp(ptr %X, ptr getelementptr inbounds ([513 x i8], ptr @.str, i32 0, i32 1), i64 15) nounwind
ret i32 %m
@@ -681,20 +719,20 @@ define i1 @length15_gt_const(ptr %X, ptr %Y) nounwind {
; X64-NEXT: movq (%rdi), %rcx
; X64-NEXT: bswapq %rcx
; X64-NEXT: cmpq %rax, %rcx
-; X64-NEXT: jne .LBB38_2
+; X64-NEXT: jne .LBB40_2
; X64-NEXT: # %bb.1: # %loadbb1
; X64-NEXT: movabsq $4051322327650219061, %rax # imm = 0x3839303132333435
; X64-NEXT: movq 7(%rdi), %rcx
; X64-NEXT: bswapq %rcx
; X64-NEXT: xorl %edx, %edx
; X64-NEXT: cmpq %rax, %rcx
-; X64-NEXT: je .LBB38_3
-; X64-NEXT: .LBB38_2: # %res_block
+; X64-NEXT: je .LBB40_3
+; X64-NEXT: .LBB40_2: # %res_block
; X64-NEXT: xorl %edx, %edx
; X64-NEXT: cmpq %rax, %rcx
; X64-NEXT: sbbl %edx, %edx
; X64-NEXT: orl $1, %edx
-; X64-NEXT: .LBB38_3: # %endblock
+; X64-NEXT: .LBB40_3: # %endblock
; X64-NEXT: testl %edx, %edx
; X64-NEXT: setg %al
; X64-NEXT: retq
@@ -713,7 +751,7 @@ define i32 @length16(ptr %X, ptr %Y) nounwind {
; X64-NEXT: bswapq %rcx
; X64-NEXT: bswapq %rdx
; X64-NEXT: cmpq %rdx, %rcx
-; X64-NEXT: jne .LBB39_2
+; X64-NEXT: jne .LBB41_2
; X64-NEXT: # %bb.1: # %loadbb1
; X64-NEXT: movq 8(%rdi), %rcx
; X64-NEXT: movq 8(%rsi), %rdx
@@ -721,13 +759,13 @@ define i32 @length16(ptr %X, ptr %Y) nounwind {
; X64-NEXT: bswapq %rdx
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: cmpq %rdx, %rcx
-; X64-NEXT: je .LBB39_3
-; X64-NEXT: .LBB39_2: # %res_block
+; X64-NEXT: je .LBB41_3
+; X64-NEXT: .LBB41_2: # %res_block
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: cmpq %rdx, %rcx
; X64-NEXT: sbbl %eax, %eax
; X64-NEXT: orl $1, %eax
-; X64-NEXT: .LBB39_3: # %endblock
+; X64-NEXT: .LBB41_3: # %endblock
; X64-NEXT: retq
%m = tail call i32 @memcmp(ptr %X, ptr %Y, i64 16) nounwind
ret i32 %m
@@ -783,7 +821,7 @@ define i1 @length16_lt(ptr %x, ptr %y) nounwind {
; X64-NEXT: bswapq %rcx
; X64-NEXT: bswapq %rdx
; X64-NEXT: cmpq %rdx, %rcx
-; X64-NEXT: jne .LBB41_2
+; X64-NEXT: jne .LBB43_2
; X64-NEXT: # %bb.1: # %loadbb1
; X64-NEXT: movq 8(%rdi), %rcx
; X64-NEXT: movq 8(%rsi), %rdx
@@ -791,13 +829,13 @@ define i1 @length16_lt(ptr %x, ptr %y) nounwind {
; X64-NEXT: bswapq %rdx
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: cmpq %rdx, %rcx
-; X64-NEXT: je .LBB41_3
-; X64-NEXT: .LBB41_2: # %res_block
+; X64-NEXT: je .LBB43_3
+; X64-NEXT: .LBB43_2: # %res_block
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: cmpq %rdx, %rcx
; X64-NEXT: sbbl %eax, %eax
; X64-NEXT: orl $1, %eax
-; X64-NEXT: .LBB41_3: # %endblock
+; X64-NEXT: .LBB43_3: # %endblock
; X64-NEXT: shrl $31, %eax
; X64-NEXT: # kill: def $al killed $al killed $eax
; X64-NEXT: retq
@@ -814,7 +852,7 @@ define i1 @length16_gt(ptr %x, ptr %y) nounwind {
; X64-NEXT: bswapq %rax
; X64-NEXT: bswapq %rcx
; X64-NEXT: cmpq %rcx, %rax
-; X64-NEXT: jne .LBB42_2
+; X64-NEXT: jne .LBB44_2
; X64-NEXT: # %bb.1: # %loadbb1
; X64-NEXT: movq 8(%rdi), %rax
; X64-NEXT: movq 8(%rsi), %rcx
@@ -822,13 +860,13 @@ define i1 @length16_gt(ptr %x, ptr %y) nounwind {
; X64-NEXT: bswapq %rcx
; X64-NEXT: xorl %edx, %edx
; X64-NEXT: cmpq %rcx, %rax
-; X64-NEXT: je .LBB42_3
-; X64-NEXT: .LBB42_2: # %res_block
+; X64-NEXT: je .LBB44_3
+; X64-NEXT: .LBB44_2: # %res_block
; X64-NEXT: xorl %edx, %edx
; X64-NEXT: cmpq %rcx, %rax
; X64-NEXT: sbbl %edx, %edx
; X64-NEXT: orl $1, %edx
-; X64-NEXT: .LBB42_3: # %endblock
+; X64-NEXT: .LBB44_3: # %endblock
; X64-NEXT: testl %edx, %edx
; X64-NEXT: setg %al
; X64-NEXT: retq
More information about the llvm-commits
mailing list