[llvm] [LoongArch] Initial implementation for `enableMemCmpExpansion` hook (PR #166526)

via llvm-commits llvm-commits at lists.llvm.org
Wed Nov 5 22:56:32 PST 2025


https://github.com/zhaoqi5 updated https://github.com/llvm/llvm-project/pull/166526

>From dfa2932b152028ac48e746c3026d9c01b97f01cb Mon Sep 17 00:00:00 2001
From: Qi Zhao <zhaoqi01 at loongson.cn>
Date: Thu, 6 Nov 2025 14:39:39 +0800
Subject: [PATCH 1/3] [LoongArch][NFC] Pre-commit tests for memcmp expansion

Same test cases as riscv.
---
 .../CodeGen/LoongArch/expandmemcmp-optsize.ll | 1147 +++++++++++++++
 llvm/test/CodeGen/LoongArch/expandmemcmp.ll   | 1227 +++++++++++++++++
 2 files changed, 2374 insertions(+)
 create mode 100644 llvm/test/CodeGen/LoongArch/expandmemcmp-optsize.ll
 create mode 100644 llvm/test/CodeGen/LoongArch/expandmemcmp.ll

diff --git a/llvm/test/CodeGen/LoongArch/expandmemcmp-optsize.ll b/llvm/test/CodeGen/LoongArch/expandmemcmp-optsize.ll
new file mode 100644
index 0000000000000..82fe899bb795b
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/expandmemcmp-optsize.ll
@@ -0,0 +1,1147 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: sed 's/iGRLen/i32/g' %s | llc --mtriple=loongarch32 --mattr=+ual \
+; RUN:   | FileCheck %s --check-prefixes=CHECK,LA32,LA32-UAL
+; RUN: sed 's/iGRLen/i64/g' %s | llc --mtriple=loongarch64 --mattr=+ual \
+; RUN:   | FileCheck %s --check-prefixes=CHECK,LA64,LA64-UAL
+; RUN: sed 's/iGRLen/i32/g' %s | llc --mtriple=loongarch32 --mattr=-ual \
+; RUN:   | FileCheck %s --check-prefixes=CHECK,LA32,LA32-NUAL
+; RUN: sed 's/iGRLen/i64/g' %s | llc --mtriple=loongarch64 --mattr=-ual \
+; RUN:   | FileCheck %s --check-prefixes=CHECK,LA64,LA64-NUAL
+
+declare i32 @bcmp(ptr, ptr, iGRLen) nounwind readonly
+declare i32 @memcmp(ptr, ptr, iGRLen) nounwind readonly
+
+define i32 @bcmp_size_0(ptr %s1, ptr %s2) nounwind optsize {
+; LA32-LABEL: bcmp_size_0:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    move $a2, $zero
+; LA32-NEXT:    bl bcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: bcmp_size_0:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    move $a2, $zero
+; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 0)
+  ret i32 %bcmp
+}
+
+define i32 @bcmp_size_1(ptr %s1, ptr %s2) nounwind optsize {
+; LA32-LABEL: bcmp_size_1:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 1
+; LA32-NEXT:    bl bcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: bcmp_size_1:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 1
+; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 1)
+  ret i32 %bcmp
+}
+
+define i32 @bcmp_size_2(ptr %s1, ptr %s2) nounwind optsize {
+; LA32-LABEL: bcmp_size_2:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 2
+; LA32-NEXT:    bl bcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: bcmp_size_2:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 2
+; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 2)
+  ret i32 %bcmp
+}
+
+define i32 @bcmp_size_3(ptr %s1, ptr %s2) nounwind optsize {
+; LA32-LABEL: bcmp_size_3:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 3
+; LA32-NEXT:    bl bcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: bcmp_size_3:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 3
+; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 3)
+  ret i32 %bcmp
+}
+
+define i32 @bcmp_size_4(ptr %s1, ptr %s2) nounwind optsize {
+; LA32-LABEL: bcmp_size_4:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 4
+; LA32-NEXT:    bl bcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: bcmp_size_4:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 4
+; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 4)
+  ret i32 %bcmp
+}
+
+define i32 @bcmp_size_5(ptr %s1, ptr %s2) nounwind optsize {
+; LA32-LABEL: bcmp_size_5:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 5
+; LA32-NEXT:    bl bcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: bcmp_size_5:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 5
+; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 5)
+  ret i32 %bcmp
+}
+
+define i32 @bcmp_size_6(ptr %s1, ptr %s2) nounwind optsize {
+; LA32-LABEL: bcmp_size_6:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 6
+; LA32-NEXT:    bl bcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: bcmp_size_6:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 6
+; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 6)
+  ret i32 %bcmp
+}
+
+define i32 @bcmp_size_7(ptr %s1, ptr %s2) nounwind optsize {
+; LA32-LABEL: bcmp_size_7:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 7
+; LA32-NEXT:    bl bcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: bcmp_size_7:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 7
+; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 7)
+  ret i32 %bcmp
+}
+
+define i32 @bcmp_size_8(ptr %s1, ptr %s2) nounwind optsize {
+; LA32-LABEL: bcmp_size_8:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 8
+; LA32-NEXT:    bl bcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: bcmp_size_8:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 8
+; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 8)
+  ret i32 %bcmp
+}
+
+define i32 @bcmp_size_15(ptr %s1, ptr %s2) nounwind optsize {
+; LA32-LABEL: bcmp_size_15:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 15
+; LA32-NEXT:    bl bcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: bcmp_size_15:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 15
+; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 15)
+  ret i32 %bcmp
+}
+
+define i32 @bcmp_size_16(ptr %s1, ptr %s2) nounwind optsize {
+; LA32-LABEL: bcmp_size_16:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 16
+; LA32-NEXT:    bl bcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: bcmp_size_16:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 16
+; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 16)
+  ret i32 %bcmp
+}
+
+define i32 @bcmp_size_31(ptr %s1, ptr %s2) nounwind optsize {
+; LA32-LABEL: bcmp_size_31:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 31
+; LA32-NEXT:    bl bcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: bcmp_size_31:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 31
+; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 31)
+  ret i32 %bcmp
+}
+
+define i32 @bcmp_size_32(ptr %s1, ptr %s2) nounwind optsize {
+; LA32-LABEL: bcmp_size_32:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 32
+; LA32-NEXT:    bl bcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: bcmp_size_32:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 32
+; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 32)
+  ret i32 %bcmp
+}
+
+define i32 @bcmp_size_63(ptr %s1, ptr %s2) nounwind optsize {
+; LA32-LABEL: bcmp_size_63:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 63
+; LA32-NEXT:    bl bcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: bcmp_size_63:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 63
+; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 63)
+  ret i32 %bcmp
+}
+
+define i32 @bcmp_size_64(ptr %s1, ptr %s2) nounwind optsize {
+; LA32-LABEL: bcmp_size_64:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 64
+; LA32-NEXT:    bl bcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: bcmp_size_64:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 64
+; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 64)
+  ret i32 %bcmp
+}
+
+define i32 @bcmp_size_127(ptr %s1, ptr %s2) nounwind optsize {
+; LA32-LABEL: bcmp_size_127:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 127
+; LA32-NEXT:    bl bcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: bcmp_size_127:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 127
+; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 127)
+  ret i32 %bcmp
+}
+
+define i32 @bcmp_size_128(ptr %s1, ptr %s2) nounwind optsize {
+; LA32-LABEL: bcmp_size_128:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 128
+; LA32-NEXT:    bl bcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: bcmp_size_128:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 128
+; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 128)
+  ret i32 %bcmp
+}
+
+define i32 @bcmp_size_runtime(ptr %s1, ptr %s2, iGRLen %len) nounwind optsize {
+; LA32-LABEL: bcmp_size_runtime:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    bl bcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: bcmp_size_runtime:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen %len)
+  ret i32 %bcmp
+}
+
+define i1 @bcmp_eq_zero(ptr %s1, ptr %s2) nounwind optsize {
+; LA32-LABEL: bcmp_eq_zero:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 4
+; LA32-NEXT:    bl bcmp
+; LA32-NEXT:    sltui $a0, $a0, 1
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: bcmp_eq_zero:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 4
+; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    sltui $a0, $a0, 1
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 4)
+  %ret = icmp eq i32 %bcmp, 0
+  ret i1 %ret
+}
+
+define i1 @bcmp_lt_zero(ptr %s1, ptr %s2) nounwind optsize {
+; LA32-LABEL: bcmp_lt_zero:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 4
+; LA32-NEXT:    bl bcmp
+; LA32-NEXT:    srli.w $a0, $a0, 31
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: bcmp_lt_zero:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 4
+; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    slti $a0, $a0, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 4)
+  %ret = icmp slt i32 %bcmp, 0
+  ret i1 %ret
+}
+
+define i1 @bcmp_gt_zero(ptr %s1, ptr %s2) nounwind optsize {
+; LA32-LABEL: bcmp_gt_zero:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 4
+; LA32-NEXT:    bl bcmp
+; LA32-NEXT:    slt $a0, $zero, $a0
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: bcmp_gt_zero:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 4
+; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    slt $a0, $zero, $a0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 4)
+  %ret = icmp sgt i32 %bcmp, 0
+  ret i1 %ret
+}
+
+define i32 @memcmp_size_0(ptr %s1, ptr %s2) nounwind optsize {
+; CHECK-LABEL: memcmp_size_0:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    move $a0, $zero
+; CHECK-NEXT:    ret
+entry:
+  %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 0)
+  ret i32 %memcmp
+}
+
+define i32 @memcmp_size_1(ptr %s1, ptr %s2) nounwind optsize {
+; LA32-LABEL: memcmp_size_1:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 1
+; LA32-NEXT:    bl memcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: memcmp_size_1:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 1
+; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 1)
+  ret i32 %memcmp
+}
+
+define i32 @memcmp_size_2(ptr %s1, ptr %s2) nounwind optsize {
+; LA32-LABEL: memcmp_size_2:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 2
+; LA32-NEXT:    bl memcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: memcmp_size_2:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 2
+; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 2)
+  ret i32 %memcmp
+}
+
+define i32 @memcmp_size_3(ptr %s1, ptr %s2) nounwind optsize {
+; LA32-LABEL: memcmp_size_3:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 3
+; LA32-NEXT:    bl memcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: memcmp_size_3:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 3
+; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 3)
+  ret i32 %memcmp
+}
+
+define i32 @memcmp_size_4(ptr %s1, ptr %s2) nounwind optsize {
+; LA32-LABEL: memcmp_size_4:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 4
+; LA32-NEXT:    bl memcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: memcmp_size_4:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 4
+; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 4)
+  ret i32 %memcmp
+}
+
+define i32 @memcmp_size_5(ptr %s1, ptr %s2) nounwind optsize {
+; LA32-LABEL: memcmp_size_5:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 5
+; LA32-NEXT:    bl memcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: memcmp_size_5:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 5
+; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 5)
+  ret i32 %memcmp
+}
+
+define i32 @memcmp_size_6(ptr %s1, ptr %s2) nounwind optsize {
+; LA32-LABEL: memcmp_size_6:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 6
+; LA32-NEXT:    bl memcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: memcmp_size_6:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 6
+; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 6)
+  ret i32 %memcmp
+}
+
+define i32 @memcmp_size_7(ptr %s1, ptr %s2) nounwind optsize {
+; LA32-LABEL: memcmp_size_7:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 7
+; LA32-NEXT:    bl memcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: memcmp_size_7:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 7
+; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 7)
+  ret i32 %memcmp
+}
+
+define i32 @memcmp_size_8(ptr %s1, ptr %s2) nounwind optsize {
+; LA32-LABEL: memcmp_size_8:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 8
+; LA32-NEXT:    bl memcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: memcmp_size_8:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 8
+; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 8)
+  ret i32 %memcmp
+}
+
+define i32 @memcmp_size_15(ptr %s1, ptr %s2) nounwind optsize {
+; LA32-LABEL: memcmp_size_15:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 15
+; LA32-NEXT:    bl memcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: memcmp_size_15:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 15
+; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 15)
+  ret i32 %memcmp
+}
+
+define i32 @memcmp_size_16(ptr %s1, ptr %s2) nounwind optsize {
+; LA32-LABEL: memcmp_size_16:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 16
+; LA32-NEXT:    bl memcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: memcmp_size_16:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 16
+; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 16)
+  ret i32 %memcmp
+}
+
+define i32 @memcmp_size_31(ptr %s1, ptr %s2) nounwind optsize {
+; LA32-LABEL: memcmp_size_31:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 31
+; LA32-NEXT:    bl memcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: memcmp_size_31:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 31
+; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 31)
+  ret i32 %memcmp
+}
+
+define i32 @memcmp_size_32(ptr %s1, ptr %s2) nounwind optsize {
+; LA32-LABEL: memcmp_size_32:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 32
+; LA32-NEXT:    bl memcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: memcmp_size_32:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 32
+; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 32)
+  ret i32 %memcmp
+}
+
+define i32 @memcmp_size_63(ptr %s1, ptr %s2) nounwind optsize {
+; LA32-LABEL: memcmp_size_63:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 63
+; LA32-NEXT:    bl memcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: memcmp_size_63:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 63
+; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 63)
+  ret i32 %memcmp
+}
+
+define i32 @memcmp_size_64(ptr %s1, ptr %s2) nounwind optsize {
+; LA32-LABEL: memcmp_size_64:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 64
+; LA32-NEXT:    bl memcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: memcmp_size_64:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 64
+; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 64)
+  ret i32 %memcmp
+}
+
+define i32 @memcmp_size_127(ptr %s1, ptr %s2) nounwind optsize {
+; LA32-LABEL: memcmp_size_127:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 127
+; LA32-NEXT:    bl memcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: memcmp_size_127:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 127
+; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 127)
+  ret i32 %memcmp
+}
+
+define i32 @memcmp_size_128(ptr %s1, ptr %s2) nounwind optsize {
+; LA32-LABEL: memcmp_size_128:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 128
+; LA32-NEXT:    bl memcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: memcmp_size_128:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 128
+; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 128)
+  ret i32 %memcmp
+}
+
+define i32 @memcmp_size_runtime(ptr %s1, ptr %s2, iGRLen %len) nounwind optsize {
+; LA32-LABEL: memcmp_size_runtime:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    bl memcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: memcmp_size_runtime:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen %len)
+  ret i32 %memcmp
+}
+
+define i1 @memcmp_eq_zero(ptr %s1, ptr %s2) nounwind optsize {
+; LA32-UAL-LABEL: memcmp_eq_zero:
+; LA32-UAL:       # %bb.0: # %entry
+; LA32-UAL-NEXT:    ld.w $a1, $a1, 0
+; LA32-UAL-NEXT:    ld.w $a0, $a0, 0
+; LA32-UAL-NEXT:    xor $a0, $a0, $a1
+; LA32-UAL-NEXT:    sltui $a0, $a0, 1
+; LA32-UAL-NEXT:    ret
+;
+; LA64-UAL-LABEL: memcmp_eq_zero:
+; LA64-UAL:       # %bb.0: # %entry
+; LA64-UAL-NEXT:    ld.w $a1, $a1, 0
+; LA64-UAL-NEXT:    ld.w $a0, $a0, 0
+; LA64-UAL-NEXT:    xor $a0, $a0, $a1
+; LA64-UAL-NEXT:    sltui $a0, $a0, 1
+; LA64-UAL-NEXT:    ret
+;
+; LA32-NUAL-LABEL: memcmp_eq_zero:
+; LA32-NUAL:       # %bb.0: # %entry
+; LA32-NUAL-NEXT:    ld.bu $a2, $a1, 1
+; LA32-NUAL-NEXT:    ld.bu $a3, $a1, 0
+; LA32-NUAL-NEXT:    ld.bu $a4, $a1, 2
+; LA32-NUAL-NEXT:    ld.bu $a1, $a1, 3
+; LA32-NUAL-NEXT:    slli.w $a2, $a2, 8
+; LA32-NUAL-NEXT:    or $a2, $a2, $a3
+; LA32-NUAL-NEXT:    slli.w $a3, $a4, 16
+; LA32-NUAL-NEXT:    slli.w $a1, $a1, 24
+; LA32-NUAL-NEXT:    or $a1, $a1, $a3
+; LA32-NUAL-NEXT:    or $a1, $a1, $a2
+; LA32-NUAL-NEXT:    ld.bu $a2, $a0, 1
+; LA32-NUAL-NEXT:    ld.bu $a3, $a0, 0
+; LA32-NUAL-NEXT:    ld.bu $a4, $a0, 2
+; LA32-NUAL-NEXT:    ld.bu $a0, $a0, 3
+; LA32-NUAL-NEXT:    slli.w $a2, $a2, 8
+; LA32-NUAL-NEXT:    or $a2, $a2, $a3
+; LA32-NUAL-NEXT:    slli.w $a3, $a4, 16
+; LA32-NUAL-NEXT:    slli.w $a0, $a0, 24
+; LA32-NUAL-NEXT:    or $a0, $a0, $a3
+; LA32-NUAL-NEXT:    or $a0, $a0, $a2
+; LA32-NUAL-NEXT:    xor $a0, $a0, $a1
+; LA32-NUAL-NEXT:    sltui $a0, $a0, 1
+; LA32-NUAL-NEXT:    ret
+;
+; LA64-NUAL-LABEL: memcmp_eq_zero:
+; LA64-NUAL:       # %bb.0: # %entry
+; LA64-NUAL-NEXT:    ld.bu $a2, $a1, 1
+; LA64-NUAL-NEXT:    ld.bu $a3, $a1, 0
+; LA64-NUAL-NEXT:    ld.bu $a4, $a1, 2
+; LA64-NUAL-NEXT:    ld.b $a1, $a1, 3
+; LA64-NUAL-NEXT:    slli.d $a2, $a2, 8
+; LA64-NUAL-NEXT:    or $a2, $a2, $a3
+; LA64-NUAL-NEXT:    slli.d $a3, $a4, 16
+; LA64-NUAL-NEXT:    slli.d $a1, $a1, 24
+; LA64-NUAL-NEXT:    or $a1, $a1, $a3
+; LA64-NUAL-NEXT:    or $a1, $a1, $a2
+; LA64-NUAL-NEXT:    ld.bu $a2, $a0, 1
+; LA64-NUAL-NEXT:    ld.bu $a3, $a0, 0
+; LA64-NUAL-NEXT:    ld.bu $a4, $a0, 2
+; LA64-NUAL-NEXT:    ld.b $a0, $a0, 3
+; LA64-NUAL-NEXT:    slli.d $a2, $a2, 8
+; LA64-NUAL-NEXT:    or $a2, $a2, $a3
+; LA64-NUAL-NEXT:    slli.d $a3, $a4, 16
+; LA64-NUAL-NEXT:    slli.d $a0, $a0, 24
+; LA64-NUAL-NEXT:    or $a0, $a0, $a3
+; LA64-NUAL-NEXT:    or $a0, $a0, $a2
+; LA64-NUAL-NEXT:    xor $a0, $a0, $a1
+; LA64-NUAL-NEXT:    sltui $a0, $a0, 1
+; LA64-NUAL-NEXT:    ret
+entry:
+  %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 4)
+  %ret = icmp eq i32 %memcmp, 0
+  ret i1 %ret
+}
+
+define i1 @memcmp_lt_zero(ptr %s1, ptr %s2) nounwind optsize {
+; LA32-LABEL: memcmp_lt_zero:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 4
+; LA32-NEXT:    bl memcmp
+; LA32-NEXT:    srli.w $a0, $a0, 31
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: memcmp_lt_zero:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 4
+; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    slti $a0, $a0, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 4)
+  %ret = icmp slt i32 %memcmp, 0
+  ret i1 %ret
+}
+
+define i1 @memcmp_gt_zero(ptr %s1, ptr %s2) nounwind optsize {
+; LA32-LABEL: memcmp_gt_zero:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 4
+; LA32-NEXT:    bl memcmp
+; LA32-NEXT:    slt $a0, $zero, $a0
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: memcmp_gt_zero:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 4
+; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    slt $a0, $zero, $a0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 4)
+  %ret = icmp sgt i32 %memcmp, 0
+  ret i1 %ret
+}
diff --git a/llvm/test/CodeGen/LoongArch/expandmemcmp.ll b/llvm/test/CodeGen/LoongArch/expandmemcmp.ll
new file mode 100644
index 0000000000000..407ff1995cf8e
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/expandmemcmp.ll
@@ -0,0 +1,1227 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: sed 's/iGRLen/i32/g' %s | llc --mtriple=loongarch32 --mattr=+ual \
+; RUN:   | FileCheck %s --check-prefixes=CHECK,LA32,LA32-UAL
+; RUN: sed 's/iGRLen/i64/g' %s | llc --mtriple=loongarch64 --mattr=+ual \
+; RUN:   | FileCheck %s --check-prefixes=CHECK,LA64,LA64-UAL
+; RUN: sed 's/iGRLen/i32/g' %s | llc --mtriple=loongarch32 --mattr=-ual \
+; RUN:   | FileCheck %s --check-prefixes=CHECK,LA32,LA32-NUAL
+; RUN: sed 's/iGRLen/i64/g' %s | llc --mtriple=loongarch64 --mattr=-ual \
+; RUN:   | FileCheck %s --check-prefixes=CHECK,LA64,LA64-NUAL
+
+declare i32 @bcmp(ptr, ptr, iGRLen) nounwind readonly
+declare i32 @memcmp(ptr, ptr, iGRLen) nounwind readonly
+
+define i32 @bcmp_size_0(ptr %s1, ptr %s2) nounwind {
+; LA32-LABEL: bcmp_size_0:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    move $a2, $zero
+; LA32-NEXT:    bl bcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: bcmp_size_0:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    move $a2, $zero
+; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 0)
+  ret i32 %bcmp
+}
+
+define i32 @bcmp_size_1(ptr %s1, ptr %s2) nounwind {
+; LA32-LABEL: bcmp_size_1:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 1
+; LA32-NEXT:    bl bcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: bcmp_size_1:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 1
+; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 1)
+  ret i32 %bcmp
+}
+
+define i32 @bcmp_size_2(ptr %s1, ptr %s2) nounwind {
+; LA32-LABEL: bcmp_size_2:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 2
+; LA32-NEXT:    bl bcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: bcmp_size_2:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 2
+; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 2)
+  ret i32 %bcmp
+}
+
+define i32 @bcmp_size_3(ptr %s1, ptr %s2) nounwind {
+; LA32-LABEL: bcmp_size_3:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 3
+; LA32-NEXT:    bl bcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: bcmp_size_3:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 3
+; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 3)
+  ret i32 %bcmp
+}
+
+define i32 @bcmp_size_4(ptr %s1, ptr %s2) nounwind {
+; LA32-LABEL: bcmp_size_4:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 4
+; LA32-NEXT:    bl bcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: bcmp_size_4:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 4
+; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 4)
+  ret i32 %bcmp
+}
+
+define i32 @bcmp_size_5(ptr %s1, ptr %s2) nounwind {
+; LA32-LABEL: bcmp_size_5:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 5
+; LA32-NEXT:    bl bcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: bcmp_size_5:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 5
+; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 5)
+  ret i32 %bcmp
+}
+
+define i32 @bcmp_size_6(ptr %s1, ptr %s2) nounwind {
+; LA32-LABEL: bcmp_size_6:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 6
+; LA32-NEXT:    bl bcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: bcmp_size_6:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 6
+; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 6)
+  ret i32 %bcmp
+}
+
+define i32 @bcmp_size_7(ptr %s1, ptr %s2) nounwind {
+; LA32-LABEL: bcmp_size_7:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 7
+; LA32-NEXT:    bl bcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: bcmp_size_7:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 7
+; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 7)
+  ret i32 %bcmp
+}
+
+define i32 @bcmp_size_8(ptr %s1, ptr %s2) nounwind {
+; LA32-LABEL: bcmp_size_8:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 8
+; LA32-NEXT:    bl bcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: bcmp_size_8:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 8
+; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 8)
+  ret i32 %bcmp
+}
+
+define i32 @bcmp_size_15(ptr %s1, ptr %s2) nounwind {
+; LA32-LABEL: bcmp_size_15:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 15
+; LA32-NEXT:    bl bcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: bcmp_size_15:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 15
+; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 15)
+  ret i32 %bcmp
+}
+
+define i32 @bcmp_size_16(ptr %s1, ptr %s2) nounwind {
+; LA32-LABEL: bcmp_size_16:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 16
+; LA32-NEXT:    bl bcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: bcmp_size_16:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 16
+; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 16)
+  ret i32 %bcmp
+}
+
+define i32 @bcmp_size_31(ptr %s1, ptr %s2) nounwind {
+; LA32-LABEL: bcmp_size_31:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 31
+; LA32-NEXT:    bl bcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: bcmp_size_31:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 31
+; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 31)
+  ret i32 %bcmp
+}
+
+define i32 @bcmp_size_32(ptr %s1, ptr %s2) nounwind {
+; LA32-LABEL: bcmp_size_32:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 32
+; LA32-NEXT:    bl bcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: bcmp_size_32:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 32
+; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 32)
+  ret i32 %bcmp
+}
+
+define i32 @bcmp_size_63(ptr %s1, ptr %s2) nounwind {
+; LA32-LABEL: bcmp_size_63:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 63
+; LA32-NEXT:    bl bcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: bcmp_size_63:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 63
+; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 63)
+  ret i32 %bcmp
+}
+
+define i32 @bcmp_size_64(ptr %s1, ptr %s2) nounwind {
+; LA32-LABEL: bcmp_size_64:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 64
+; LA32-NEXT:    bl bcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: bcmp_size_64:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 64
+; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 64)
+  ret i32 %bcmp
+}
+
+define i32 @bcmp_size_127(ptr %s1, ptr %s2) nounwind {
+; LA32-LABEL: bcmp_size_127:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 127
+; LA32-NEXT:    bl bcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: bcmp_size_127:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 127
+; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 127)
+  ret i32 %bcmp
+}
+
+define i32 @bcmp_size_128(ptr %s1, ptr %s2) nounwind {
+; LA32-LABEL: bcmp_size_128:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 128
+; LA32-NEXT:    bl bcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: bcmp_size_128:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 128
+; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 128)
+  ret i32 %bcmp
+}
+
+define i32 @bcmp_size_runtime(ptr %s1, ptr %s2, iGRLen %len) nounwind {
+; LA32-LABEL: bcmp_size_runtime:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    bl bcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: bcmp_size_runtime:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen %len)
+  ret i32 %bcmp
+}
+
+define i1 @bcmp_eq_zero(ptr %s1, ptr %s2) nounwind {
+; LA32-LABEL: bcmp_eq_zero:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 16
+; LA32-NEXT:    bl bcmp
+; LA32-NEXT:    sltui $a0, $a0, 1
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: bcmp_eq_zero:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 16
+; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    sltui $a0, $a0, 1
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 16)
+  %ret = icmp eq i32 %bcmp, 0
+  ret i1 %ret
+}
+
+define i1 @bcmp_lt_zero(ptr %s1, ptr %s2) nounwind {
+; LA32-LABEL: bcmp_lt_zero:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 4
+; LA32-NEXT:    bl bcmp
+; LA32-NEXT:    srli.w $a0, $a0, 31
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: bcmp_lt_zero:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 4
+; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    slti $a0, $a0, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 4)
+  %ret = icmp slt i32 %bcmp, 0
+  ret i1 %ret
+}
+
+define i1 @bcmp_gt_zero(ptr %s1, ptr %s2) nounwind {
+; LA32-LABEL: bcmp_gt_zero:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 4
+; LA32-NEXT:    bl bcmp
+; LA32-NEXT:    slt $a0, $zero, $a0
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: bcmp_gt_zero:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 4
+; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    slt $a0, $zero, $a0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 4)
+  %ret = icmp sgt i32 %bcmp, 0
+  ret i1 %ret
+}
+
+define i1 @bcmp_le_zero(ptr %s1, ptr %s2) nounwind {
+; LA32-LABEL: bcmp_le_zero:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 4
+; LA32-NEXT:    bl bcmp
+; LA32-NEXT:    slti $a0, $a0, 1
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: bcmp_le_zero:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 4
+; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    slti $a0, $a0, 1
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 4)
+  %ret = icmp slt i32 %bcmp, 1
+  ret i1 %ret
+}
+
+define i1 @bcmp_ge_zero(ptr %s1, ptr %s2) nounwind {
+; LA32-LABEL: bcmp_ge_zero:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 4
+; LA32-NEXT:    bl bcmp
+; LA32-NEXT:    addi.w $a1, $zero, -1
+; LA32-NEXT:    slt $a0, $a1, $a0
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: bcmp_ge_zero:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 4
+; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    addi.w $a1, $zero, -1
+; LA64-NEXT:    slt $a0, $a1, $a0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 4)
+  %ret = icmp sgt i32 %bcmp, -1
+  ret i1 %ret
+}
+
+define i32 @memcmp_size_0(ptr %s1, ptr %s2) nounwind {
+; CHECK-LABEL: memcmp_size_0:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    move $a0, $zero
+; CHECK-NEXT:    ret
+entry:
+  %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 0)
+  ret i32 %memcmp
+}
+
+define i32 @memcmp_size_1(ptr %s1, ptr %s2) nounwind {
+; LA32-LABEL: memcmp_size_1:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 1
+; LA32-NEXT:    bl memcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: memcmp_size_1:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 1
+; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 1)
+  ret i32 %memcmp
+}
+
+define i32 @memcmp_size_2(ptr %s1, ptr %s2) nounwind {
+; LA32-LABEL: memcmp_size_2:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 2
+; LA32-NEXT:    bl memcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: memcmp_size_2:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 2
+; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 2)
+  ret i32 %memcmp
+}
+
+define i32 @memcmp_size_3(ptr %s1, ptr %s2) nounwind {
+; LA32-LABEL: memcmp_size_3:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 3
+; LA32-NEXT:    bl memcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: memcmp_size_3:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 3
+; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 3)
+  ret i32 %memcmp
+}
+
+define i32 @memcmp_size_4(ptr %s1, ptr %s2) nounwind {
+; LA32-LABEL: memcmp_size_4:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 4
+; LA32-NEXT:    bl memcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: memcmp_size_4:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 4
+; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 4)
+  ret i32 %memcmp
+}
+
+define i32 @memcmp_size_5(ptr %s1, ptr %s2) nounwind {
+; LA32-LABEL: memcmp_size_5:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 5
+; LA32-NEXT:    bl memcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: memcmp_size_5:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 5
+; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 5)
+  ret i32 %memcmp
+}
+
+define i32 @memcmp_size_6(ptr %s1, ptr %s2) nounwind {
+; LA32-LABEL: memcmp_size_6:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 6
+; LA32-NEXT:    bl memcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: memcmp_size_6:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 6
+; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 6)
+  ret i32 %memcmp
+}
+
+define i32 @memcmp_size_7(ptr %s1, ptr %s2) nounwind {
+; LA32-LABEL: memcmp_size_7:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 7
+; LA32-NEXT:    bl memcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: memcmp_size_7:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 7
+; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 7)
+  ret i32 %memcmp
+}
+
+define i32 @memcmp_size_8(ptr %s1, ptr %s2) nounwind {
+; LA32-LABEL: memcmp_size_8:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 8
+; LA32-NEXT:    bl memcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: memcmp_size_8:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 8
+; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 8)
+  ret i32 %memcmp
+}
+
+define i32 @memcmp_size_15(ptr %s1, ptr %s2) nounwind {
+; LA32-LABEL: memcmp_size_15:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 15
+; LA32-NEXT:    bl memcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: memcmp_size_15:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 15
+; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 15)
+  ret i32 %memcmp
+}
+
+define i32 @memcmp_size_16(ptr %s1, ptr %s2) nounwind {
+; LA32-LABEL: memcmp_size_16:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 16
+; LA32-NEXT:    bl memcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: memcmp_size_16:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 16
+; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 16)
+  ret i32 %memcmp
+}
+
+define i32 @memcmp_size_31(ptr %s1, ptr %s2) nounwind {
+; LA32-LABEL: memcmp_size_31:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 31
+; LA32-NEXT:    bl memcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: memcmp_size_31:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 31
+; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 31)
+  ret i32 %memcmp
+}
+
+define i32 @memcmp_size_32(ptr %s1, ptr %s2) nounwind {
+; LA32-LABEL: memcmp_size_32:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 32
+; LA32-NEXT:    bl memcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: memcmp_size_32:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 32
+; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 32)
+  ret i32 %memcmp
+}
+
+define i32 @memcmp_size_63(ptr %s1, ptr %s2) nounwind {
+; LA32-LABEL: memcmp_size_63:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 63
+; LA32-NEXT:    bl memcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: memcmp_size_63:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 63
+; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 63)
+  ret i32 %memcmp
+}
+
+define i32 @memcmp_size_64(ptr %s1, ptr %s2) nounwind {
+; LA32-LABEL: memcmp_size_64:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 64
+; LA32-NEXT:    bl memcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: memcmp_size_64:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 64
+; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 64)
+  ret i32 %memcmp
+}
+
+define i32 @memcmp_size_127(ptr %s1, ptr %s2) nounwind {
+; LA32-LABEL: memcmp_size_127:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 127
+; LA32-NEXT:    bl memcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: memcmp_size_127:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 127
+; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 127)
+  ret i32 %memcmp
+}
+
+define i32 @memcmp_size_128(ptr %s1, ptr %s2) nounwind {
+; LA32-LABEL: memcmp_size_128:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 128
+; LA32-NEXT:    bl memcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: memcmp_size_128:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 128
+; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 128)
+  ret i32 %memcmp
+}
+
+define i32 @memcmp_size_runtime(ptr %s1, ptr %s2, iGRLen %len) nounwind {
+; LA32-LABEL: memcmp_size_runtime:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    bl memcmp
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: memcmp_size_runtime:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen %len)
+  ret i32 %memcmp
+}
+
+define i1 @memcmp_eq_zero(ptr %s1, ptr %s2) nounwind {
+; LA32-LABEL: memcmp_eq_zero:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 16
+; LA32-NEXT:    bl memcmp
+; LA32-NEXT:    sltui $a0, $a0, 1
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: memcmp_eq_zero:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 16
+; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    sltui $a0, $a0, 1
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 16)
+  %ret = icmp eq i32 %memcmp, 0
+  ret i1 %ret
+}
+
+define i1 @memcmp_lt_zero(ptr %s1, ptr %s2) nounwind {
+; LA32-LABEL: memcmp_lt_zero:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 4
+; LA32-NEXT:    bl memcmp
+; LA32-NEXT:    srli.w $a0, $a0, 31
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: memcmp_lt_zero:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 4
+; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    slti $a0, $a0, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 4)
+  %ret = icmp slt i32 %memcmp, 0
+  ret i1 %ret
+}
+
+define i1 @memcmp_gt_zero(ptr %s1, ptr %s2) nounwind {
+; LA32-LABEL: memcmp_gt_zero:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 4
+; LA32-NEXT:    bl memcmp
+; LA32-NEXT:    slt $a0, $zero, $a0
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: memcmp_gt_zero:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 4
+; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    slt $a0, $zero, $a0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 4)
+  %ret = icmp sgt i32 %memcmp, 0
+  ret i1 %ret
+}
+
+define i1 @memcmp_le_zero(ptr %s1, ptr %s2) nounwind {
+; LA32-LABEL: memcmp_le_zero:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 4
+; LA32-NEXT:    bl memcmp
+; LA32-NEXT:    slti $a0, $a0, 1
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: memcmp_le_zero:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 4
+; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    slti $a0, $a0, 1
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 4)
+  %ret = icmp slt i32 %memcmp, 1
+  ret i1 %ret
+}
+
+define i1 @memcmp_ge_zero(ptr %s1, ptr %s2) nounwind {
+; LA32-LABEL: memcmp_ge_zero:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ori $a2, $zero, 4
+; LA32-NEXT:    bl memcmp
+; LA32-NEXT:    addi.w $a1, $zero, -1
+; LA32-NEXT:    slt $a0, $a1, $a0
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: memcmp_ge_zero:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ori $a2, $zero, 4
+; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    addi.w $a1, $zero, -1
+; LA64-NEXT:    slt $a0, $a1, $a0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+entry:
+  %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 4)
+  %ret = icmp sgt i32 %memcmp, -1
+  ret i1 %ret
+}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; LA32-NUAL: {{.*}}
+; LA32-UAL: {{.*}}
+; LA64-NUAL: {{.*}}
+; LA64-UAL: {{.*}}

>From 9d39a76c7326c3a5d8c3c366da9c20e7dc070461 Mon Sep 17 00:00:00 2001
From: Qi Zhao <zhaoqi01 at loongson.cn>
Date: Wed, 5 Nov 2025 18:01:05 +0800
Subject: [PATCH 2/3] [LoongArch] Initial implementation for
 `enableMemCmpExpansion` hook

After overriding `TargetTransformInfo::enableMemCmpExpansion`
in this commit, `MergeICmps` and `ExpandMemCmp` passes will be
enabled on LoongArch.
---
 .../LoongArchTargetTransformInfo.cpp          | 21 ++++++++++++++++++-
 .../LoongArch/LoongArchTargetTransformInfo.h  |  3 ++-
 2 files changed, 22 insertions(+), 2 deletions(-)

diff --git a/llvm/lib/Target/LoongArch/LoongArchTargetTransformInfo.cpp b/llvm/lib/Target/LoongArch/LoongArchTargetTransformInfo.cpp
index f548a8dd0532b..1eaac7348ce24 100644
--- a/llvm/lib/Target/LoongArch/LoongArchTargetTransformInfo.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchTargetTransformInfo.cpp
@@ -111,4 +111,23 @@ bool LoongArchTTIImpl::shouldExpandReduction(const IntrinsicInst *II) const {
   }
 }
 
-// TODO: Implement more hooks to provide TTI machinery for LoongArch.
+LoongArchTTIImpl::TTI::MemCmpExpansionOptions
+LoongArchTTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const {
+  TTI::MemCmpExpansionOptions Options;
+
+  if (!ST->hasUAL())
+    return Options;
+
+  // TODO: Set same as the default value of MaxLoadsPerMemcmp or
+  // MaxLoadsPerMemcmpOptSize. May need more consideration?
+  Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize);
+  Options.NumLoadsPerBlock = Options.MaxNumLoads;
+  Options.AllowOverlappingLoads = true;
+
+  // TODO: Support for vectors.
+  if (ST->is64Bit())
+    Options.LoadSizes.push_back(8);
+  Options.LoadSizes.append({4, 2, 1});
+
+  return Options;
+}
diff --git a/llvm/lib/Target/LoongArch/LoongArchTargetTransformInfo.h b/llvm/lib/Target/LoongArch/LoongArchTargetTransformInfo.h
index e3f16c7804994..9b479f9dc0dc5 100644
--- a/llvm/lib/Target/LoongArch/LoongArchTargetTransformInfo.h
+++ b/llvm/lib/Target/LoongArch/LoongArchTargetTransformInfo.h
@@ -55,7 +55,8 @@ class LoongArchTTIImpl : public BasicTTIImplBase<LoongArchTTIImpl> {
 
   bool shouldExpandReduction(const IntrinsicInst *II) const override;
 
-  // TODO: Implement more hooks to provide TTI machinery for LoongArch.
+  TTI::MemCmpExpansionOptions
+  enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const override;
 };
 
 } // end namespace llvm

>From 0c426226c9b317ad3ca1ae9e86c3435dd3bef552 Mon Sep 17 00:00:00 2001
From: Qi Zhao <zhaoqi01 at loongson.cn>
Date: Thu, 6 Nov 2025 14:53:46 +0800
Subject: [PATCH 3/3] update tests

---
 .../CodeGen/LoongArch/expandmemcmp-optsize.ll | 2164 ++++++++---
 llvm/test/CodeGen/LoongArch/expandmemcmp.ll   | 3327 +++++++++++++----
 llvm/test/CodeGen/LoongArch/memcmp.ll         |   27 +-
 3 files changed, 4267 insertions(+), 1251 deletions(-)

diff --git a/llvm/test/CodeGen/LoongArch/expandmemcmp-optsize.ll b/llvm/test/CodeGen/LoongArch/expandmemcmp-optsize.ll
index 82fe899bb795b..bb81759487afd 100644
--- a/llvm/test/CodeGen/LoongArch/expandmemcmp-optsize.ll
+++ b/llvm/test/CodeGen/LoongArch/expandmemcmp-optsize.ll
@@ -38,260 +38,488 @@ entry:
 }
 
 define i32 @bcmp_size_1(ptr %s1, ptr %s2) nounwind optsize {
-; LA32-LABEL: bcmp_size_1:
-; LA32:       # %bb.0: # %entry
-; LA32-NEXT:    addi.w $sp, $sp, -16
-; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    ori $a2, $zero, 1
-; LA32-NEXT:    bl bcmp
-; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
-; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    ret
+; LA32-UAL-LABEL: bcmp_size_1:
+; LA32-UAL:       # %bb.0: # %entry
+; LA32-UAL-NEXT:    ld.bu $a0, $a0, 0
+; LA32-UAL-NEXT:    ld.bu $a1, $a1, 0
+; LA32-UAL-NEXT:    xor $a0, $a0, $a1
+; LA32-UAL-NEXT:    sltu $a0, $zero, $a0
+; LA32-UAL-NEXT:    ret
 ;
-; LA64-LABEL: bcmp_size_1:
-; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    addi.d $sp, $sp, -16
-; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    ori $a2, $zero, 1
-; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
-; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    ret
+; LA64-UAL-LABEL: bcmp_size_1:
+; LA64-UAL:       # %bb.0: # %entry
+; LA64-UAL-NEXT:    ld.bu $a0, $a0, 0
+; LA64-UAL-NEXT:    ld.bu $a1, $a1, 0
+; LA64-UAL-NEXT:    xor $a0, $a0, $a1
+; LA64-UAL-NEXT:    sltu $a0, $zero, $a0
+; LA64-UAL-NEXT:    ret
+;
+; LA32-NUAL-LABEL: bcmp_size_1:
+; LA32-NUAL:       # %bb.0: # %entry
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, -16
+; LA32-NUAL-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NUAL-NEXT:    ori $a2, $zero, 1
+; LA32-NUAL-NEXT:    bl bcmp
+; LA32-NUAL-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, 16
+; LA32-NUAL-NEXT:    ret
+;
+; LA64-NUAL-LABEL: bcmp_size_1:
+; LA64-NUAL:       # %bb.0: # %entry
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, -16
+; LA64-NUAL-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NUAL-NEXT:    ori $a2, $zero, 1
+; LA64-NUAL-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NUAL-NEXT:    jirl $ra, $ra, 0
+; LA64-NUAL-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, 16
+; LA64-NUAL-NEXT:    ret
 entry:
   %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 1)
   ret i32 %bcmp
 }
 
 define i32 @bcmp_size_2(ptr %s1, ptr %s2) nounwind optsize {
-; LA32-LABEL: bcmp_size_2:
-; LA32:       # %bb.0: # %entry
-; LA32-NEXT:    addi.w $sp, $sp, -16
-; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    ori $a2, $zero, 2
-; LA32-NEXT:    bl bcmp
-; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
-; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    ret
+; LA32-UAL-LABEL: bcmp_size_2:
+; LA32-UAL:       # %bb.0: # %entry
+; LA32-UAL-NEXT:    ld.hu $a0, $a0, 0
+; LA32-UAL-NEXT:    ld.hu $a1, $a1, 0
+; LA32-UAL-NEXT:    xor $a0, $a0, $a1
+; LA32-UAL-NEXT:    sltu $a0, $zero, $a0
+; LA32-UAL-NEXT:    ret
 ;
-; LA64-LABEL: bcmp_size_2:
-; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    addi.d $sp, $sp, -16
-; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    ori $a2, $zero, 2
-; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
-; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    ret
+; LA64-UAL-LABEL: bcmp_size_2:
+; LA64-UAL:       # %bb.0: # %entry
+; LA64-UAL-NEXT:    ld.hu $a0, $a0, 0
+; LA64-UAL-NEXT:    ld.hu $a1, $a1, 0
+; LA64-UAL-NEXT:    xor $a0, $a0, $a1
+; LA64-UAL-NEXT:    sltu $a0, $zero, $a0
+; LA64-UAL-NEXT:    ret
+;
+; LA32-NUAL-LABEL: bcmp_size_2:
+; LA32-NUAL:       # %bb.0: # %entry
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, -16
+; LA32-NUAL-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NUAL-NEXT:    ori $a2, $zero, 2
+; LA32-NUAL-NEXT:    bl bcmp
+; LA32-NUAL-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, 16
+; LA32-NUAL-NEXT:    ret
+;
+; LA64-NUAL-LABEL: bcmp_size_2:
+; LA64-NUAL:       # %bb.0: # %entry
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, -16
+; LA64-NUAL-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NUAL-NEXT:    ori $a2, $zero, 2
+; LA64-NUAL-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NUAL-NEXT:    jirl $ra, $ra, 0
+; LA64-NUAL-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, 16
+; LA64-NUAL-NEXT:    ret
 entry:
   %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 2)
   ret i32 %bcmp
 }
 
 define i32 @bcmp_size_3(ptr %s1, ptr %s2) nounwind optsize {
-; LA32-LABEL: bcmp_size_3:
-; LA32:       # %bb.0: # %entry
-; LA32-NEXT:    addi.w $sp, $sp, -16
-; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    ori $a2, $zero, 3
-; LA32-NEXT:    bl bcmp
-; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
-; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    ret
+; LA32-UAL-LABEL: bcmp_size_3:
+; LA32-UAL:       # %bb.0: # %entry
+; LA32-UAL-NEXT:    ld.hu $a2, $a0, 0
+; LA32-UAL-NEXT:    ld.hu $a3, $a1, 0
+; LA32-UAL-NEXT:    ld.bu $a0, $a0, 2
+; LA32-UAL-NEXT:    ld.bu $a1, $a1, 2
+; LA32-UAL-NEXT:    xor $a2, $a2, $a3
+; LA32-UAL-NEXT:    xor $a0, $a0, $a1
+; LA32-UAL-NEXT:    or $a0, $a2, $a0
+; LA32-UAL-NEXT:    sltu $a0, $zero, $a0
+; LA32-UAL-NEXT:    ret
 ;
-; LA64-LABEL: bcmp_size_3:
-; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    addi.d $sp, $sp, -16
-; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    ori $a2, $zero, 3
-; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
-; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    ret
+; LA64-UAL-LABEL: bcmp_size_3:
+; LA64-UAL:       # %bb.0: # %entry
+; LA64-UAL-NEXT:    ld.hu $a2, $a0, 0
+; LA64-UAL-NEXT:    ld.hu $a3, $a1, 0
+; LA64-UAL-NEXT:    ld.bu $a0, $a0, 2
+; LA64-UAL-NEXT:    ld.bu $a1, $a1, 2
+; LA64-UAL-NEXT:    xor $a2, $a2, $a3
+; LA64-UAL-NEXT:    xor $a0, $a0, $a1
+; LA64-UAL-NEXT:    or $a0, $a2, $a0
+; LA64-UAL-NEXT:    sltu $a0, $zero, $a0
+; LA64-UAL-NEXT:    ret
+;
+; LA32-NUAL-LABEL: bcmp_size_3:
+; LA32-NUAL:       # %bb.0: # %entry
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, -16
+; LA32-NUAL-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NUAL-NEXT:    ori $a2, $zero, 3
+; LA32-NUAL-NEXT:    bl bcmp
+; LA32-NUAL-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, 16
+; LA32-NUAL-NEXT:    ret
+;
+; LA64-NUAL-LABEL: bcmp_size_3:
+; LA64-NUAL:       # %bb.0: # %entry
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, -16
+; LA64-NUAL-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NUAL-NEXT:    ori $a2, $zero, 3
+; LA64-NUAL-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NUAL-NEXT:    jirl $ra, $ra, 0
+; LA64-NUAL-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, 16
+; LA64-NUAL-NEXT:    ret
 entry:
   %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 3)
   ret i32 %bcmp
 }
 
 define i32 @bcmp_size_4(ptr %s1, ptr %s2) nounwind optsize {
-; LA32-LABEL: bcmp_size_4:
-; LA32:       # %bb.0: # %entry
-; LA32-NEXT:    addi.w $sp, $sp, -16
-; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    ori $a2, $zero, 4
-; LA32-NEXT:    bl bcmp
-; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
-; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    ret
+; LA32-UAL-LABEL: bcmp_size_4:
+; LA32-UAL:       # %bb.0: # %entry
+; LA32-UAL-NEXT:    ld.w $a0, $a0, 0
+; LA32-UAL-NEXT:    ld.w $a1, $a1, 0
+; LA32-UAL-NEXT:    xor $a0, $a0, $a1
+; LA32-UAL-NEXT:    sltu $a0, $zero, $a0
+; LA32-UAL-NEXT:    ret
 ;
-; LA64-LABEL: bcmp_size_4:
-; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    addi.d $sp, $sp, -16
-; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    ori $a2, $zero, 4
-; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
-; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    ret
+; LA64-UAL-LABEL: bcmp_size_4:
+; LA64-UAL:       # %bb.0: # %entry
+; LA64-UAL-NEXT:    ld.w $a0, $a0, 0
+; LA64-UAL-NEXT:    ld.w $a1, $a1, 0
+; LA64-UAL-NEXT:    xor $a0, $a0, $a1
+; LA64-UAL-NEXT:    sltu $a0, $zero, $a0
+; LA64-UAL-NEXT:    ret
+;
+; LA32-NUAL-LABEL: bcmp_size_4:
+; LA32-NUAL:       # %bb.0: # %entry
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, -16
+; LA32-NUAL-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NUAL-NEXT:    ori $a2, $zero, 4
+; LA32-NUAL-NEXT:    bl bcmp
+; LA32-NUAL-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, 16
+; LA32-NUAL-NEXT:    ret
+;
+; LA64-NUAL-LABEL: bcmp_size_4:
+; LA64-NUAL:       # %bb.0: # %entry
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, -16
+; LA64-NUAL-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NUAL-NEXT:    ori $a2, $zero, 4
+; LA64-NUAL-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NUAL-NEXT:    jirl $ra, $ra, 0
+; LA64-NUAL-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, 16
+; LA64-NUAL-NEXT:    ret
 entry:
   %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 4)
   ret i32 %bcmp
 }
 
 define i32 @bcmp_size_5(ptr %s1, ptr %s2) nounwind optsize {
-; LA32-LABEL: bcmp_size_5:
-; LA32:       # %bb.0: # %entry
-; LA32-NEXT:    addi.w $sp, $sp, -16
-; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    ori $a2, $zero, 5
-; LA32-NEXT:    bl bcmp
-; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
-; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    ret
+; LA32-UAL-LABEL: bcmp_size_5:
+; LA32-UAL:       # %bb.0: # %entry
+; LA32-UAL-NEXT:    ld.w $a2, $a0, 0
+; LA32-UAL-NEXT:    ld.w $a3, $a1, 0
+; LA32-UAL-NEXT:    ld.bu $a0, $a0, 4
+; LA32-UAL-NEXT:    ld.bu $a1, $a1, 4
+; LA32-UAL-NEXT:    xor $a2, $a2, $a3
+; LA32-UAL-NEXT:    xor $a0, $a0, $a1
+; LA32-UAL-NEXT:    or $a0, $a2, $a0
+; LA32-UAL-NEXT:    sltu $a0, $zero, $a0
+; LA32-UAL-NEXT:    ret
 ;
-; LA64-LABEL: bcmp_size_5:
-; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    addi.d $sp, $sp, -16
-; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    ori $a2, $zero, 5
-; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
-; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    ret
+; LA64-UAL-LABEL: bcmp_size_5:
+; LA64-UAL:       # %bb.0: # %entry
+; LA64-UAL-NEXT:    ld.w $a2, $a0, 0
+; LA64-UAL-NEXT:    ld.w $a3, $a1, 0
+; LA64-UAL-NEXT:    ld.bu $a0, $a0, 4
+; LA64-UAL-NEXT:    ld.bu $a1, $a1, 4
+; LA64-UAL-NEXT:    xor $a2, $a2, $a3
+; LA64-UAL-NEXT:    xor $a0, $a0, $a1
+; LA64-UAL-NEXT:    or $a0, $a2, $a0
+; LA64-UAL-NEXT:    sltu $a0, $zero, $a0
+; LA64-UAL-NEXT:    ret
+;
+; LA32-NUAL-LABEL: bcmp_size_5:
+; LA32-NUAL:       # %bb.0: # %entry
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, -16
+; LA32-NUAL-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NUAL-NEXT:    ori $a2, $zero, 5
+; LA32-NUAL-NEXT:    bl bcmp
+; LA32-NUAL-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, 16
+; LA32-NUAL-NEXT:    ret
+;
+; LA64-NUAL-LABEL: bcmp_size_5:
+; LA64-NUAL:       # %bb.0: # %entry
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, -16
+; LA64-NUAL-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NUAL-NEXT:    ori $a2, $zero, 5
+; LA64-NUAL-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NUAL-NEXT:    jirl $ra, $ra, 0
+; LA64-NUAL-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, 16
+; LA64-NUAL-NEXT:    ret
 entry:
   %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 5)
   ret i32 %bcmp
 }
 
 define i32 @bcmp_size_6(ptr %s1, ptr %s2) nounwind optsize {
-; LA32-LABEL: bcmp_size_6:
-; LA32:       # %bb.0: # %entry
-; LA32-NEXT:    addi.w $sp, $sp, -16
-; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    ori $a2, $zero, 6
-; LA32-NEXT:    bl bcmp
-; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
-; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    ret
+; LA32-UAL-LABEL: bcmp_size_6:
+; LA32-UAL:       # %bb.0: # %entry
+; LA32-UAL-NEXT:    ld.w $a2, $a0, 0
+; LA32-UAL-NEXT:    ld.w $a3, $a1, 0
+; LA32-UAL-NEXT:    ld.hu $a0, $a0, 4
+; LA32-UAL-NEXT:    ld.hu $a1, $a1, 4
+; LA32-UAL-NEXT:    xor $a2, $a2, $a3
+; LA32-UAL-NEXT:    xor $a0, $a0, $a1
+; LA32-UAL-NEXT:    or $a0, $a2, $a0
+; LA32-UAL-NEXT:    sltu $a0, $zero, $a0
+; LA32-UAL-NEXT:    ret
 ;
-; LA64-LABEL: bcmp_size_6:
-; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    addi.d $sp, $sp, -16
-; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    ori $a2, $zero, 6
-; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
-; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    ret
+; LA64-UAL-LABEL: bcmp_size_6:
+; LA64-UAL:       # %bb.0: # %entry
+; LA64-UAL-NEXT:    ld.w $a2, $a0, 0
+; LA64-UAL-NEXT:    ld.w $a3, $a1, 0
+; LA64-UAL-NEXT:    ld.hu $a0, $a0, 4
+; LA64-UAL-NEXT:    ld.hu $a1, $a1, 4
+; LA64-UAL-NEXT:    xor $a2, $a2, $a3
+; LA64-UAL-NEXT:    xor $a0, $a0, $a1
+; LA64-UAL-NEXT:    or $a0, $a2, $a0
+; LA64-UAL-NEXT:    sltu $a0, $zero, $a0
+; LA64-UAL-NEXT:    ret
+;
+; LA32-NUAL-LABEL: bcmp_size_6:
+; LA32-NUAL:       # %bb.0: # %entry
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, -16
+; LA32-NUAL-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NUAL-NEXT:    ori $a2, $zero, 6
+; LA32-NUAL-NEXT:    bl bcmp
+; LA32-NUAL-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, 16
+; LA32-NUAL-NEXT:    ret
+;
+; LA64-NUAL-LABEL: bcmp_size_6:
+; LA64-NUAL:       # %bb.0: # %entry
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, -16
+; LA64-NUAL-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NUAL-NEXT:    ori $a2, $zero, 6
+; LA64-NUAL-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NUAL-NEXT:    jirl $ra, $ra, 0
+; LA64-NUAL-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, 16
+; LA64-NUAL-NEXT:    ret
 entry:
   %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 6)
   ret i32 %bcmp
 }
 
 define i32 @bcmp_size_7(ptr %s1, ptr %s2) nounwind optsize {
-; LA32-LABEL: bcmp_size_7:
-; LA32:       # %bb.0: # %entry
-; LA32-NEXT:    addi.w $sp, $sp, -16
-; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    ori $a2, $zero, 7
-; LA32-NEXT:    bl bcmp
-; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
-; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    ret
+; LA32-UAL-LABEL: bcmp_size_7:
+; LA32-UAL:       # %bb.0: # %entry
+; LA32-UAL-NEXT:    ld.w $a2, $a0, 0
+; LA32-UAL-NEXT:    ld.w $a3, $a1, 0
+; LA32-UAL-NEXT:    ld.w $a0, $a0, 3
+; LA32-UAL-NEXT:    ld.w $a1, $a1, 3
+; LA32-UAL-NEXT:    xor $a2, $a2, $a3
+; LA32-UAL-NEXT:    xor $a0, $a0, $a1
+; LA32-UAL-NEXT:    or $a0, $a2, $a0
+; LA32-UAL-NEXT:    sltu $a0, $zero, $a0
+; LA32-UAL-NEXT:    ret
 ;
-; LA64-LABEL: bcmp_size_7:
-; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    addi.d $sp, $sp, -16
-; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    ori $a2, $zero, 7
-; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
-; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    ret
+; LA64-UAL-LABEL: bcmp_size_7:
+; LA64-UAL:       # %bb.0: # %entry
+; LA64-UAL-NEXT:    ld.w $a2, $a0, 0
+; LA64-UAL-NEXT:    ld.w $a3, $a1, 0
+; LA64-UAL-NEXT:    ld.w $a0, $a0, 3
+; LA64-UAL-NEXT:    ld.w $a1, $a1, 3
+; LA64-UAL-NEXT:    xor $a2, $a2, $a3
+; LA64-UAL-NEXT:    xor $a0, $a0, $a1
+; LA64-UAL-NEXT:    or $a0, $a2, $a0
+; LA64-UAL-NEXT:    sltu $a0, $zero, $a0
+; LA64-UAL-NEXT:    ret
+;
+; LA32-NUAL-LABEL: bcmp_size_7:
+; LA32-NUAL:       # %bb.0: # %entry
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, -16
+; LA32-NUAL-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NUAL-NEXT:    ori $a2, $zero, 7
+; LA32-NUAL-NEXT:    bl bcmp
+; LA32-NUAL-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, 16
+; LA32-NUAL-NEXT:    ret
+;
+; LA64-NUAL-LABEL: bcmp_size_7:
+; LA64-NUAL:       # %bb.0: # %entry
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, -16
+; LA64-NUAL-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NUAL-NEXT:    ori $a2, $zero, 7
+; LA64-NUAL-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NUAL-NEXT:    jirl $ra, $ra, 0
+; LA64-NUAL-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, 16
+; LA64-NUAL-NEXT:    ret
 entry:
   %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 7)
   ret i32 %bcmp
 }
 
 define i32 @bcmp_size_8(ptr %s1, ptr %s2) nounwind optsize {
-; LA32-LABEL: bcmp_size_8:
-; LA32:       # %bb.0: # %entry
-; LA32-NEXT:    addi.w $sp, $sp, -16
-; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    ori $a2, $zero, 8
-; LA32-NEXT:    bl bcmp
-; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
-; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    ret
+; LA32-UAL-LABEL: bcmp_size_8:
+; LA32-UAL:       # %bb.0: # %entry
+; LA32-UAL-NEXT:    ld.w $a2, $a0, 0
+; LA32-UAL-NEXT:    ld.w $a3, $a1, 0
+; LA32-UAL-NEXT:    ld.w $a0, $a0, 4
+; LA32-UAL-NEXT:    ld.w $a1, $a1, 4
+; LA32-UAL-NEXT:    xor $a2, $a2, $a3
+; LA32-UAL-NEXT:    xor $a0, $a0, $a1
+; LA32-UAL-NEXT:    or $a0, $a2, $a0
+; LA32-UAL-NEXT:    sltu $a0, $zero, $a0
+; LA32-UAL-NEXT:    ret
 ;
-; LA64-LABEL: bcmp_size_8:
-; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    addi.d $sp, $sp, -16
-; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    ori $a2, $zero, 8
-; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
-; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    ret
+; LA64-UAL-LABEL: bcmp_size_8:
+; LA64-UAL:       # %bb.0: # %entry
+; LA64-UAL-NEXT:    ld.d $a0, $a0, 0
+; LA64-UAL-NEXT:    ld.d $a1, $a1, 0
+; LA64-UAL-NEXT:    xor $a0, $a0, $a1
+; LA64-UAL-NEXT:    sltu $a0, $zero, $a0
+; LA64-UAL-NEXT:    ret
+;
+; LA32-NUAL-LABEL: bcmp_size_8:
+; LA32-NUAL:       # %bb.0: # %entry
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, -16
+; LA32-NUAL-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NUAL-NEXT:    ori $a2, $zero, 8
+; LA32-NUAL-NEXT:    bl bcmp
+; LA32-NUAL-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, 16
+; LA32-NUAL-NEXT:    ret
+;
+; LA64-NUAL-LABEL: bcmp_size_8:
+; LA64-NUAL:       # %bb.0: # %entry
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, -16
+; LA64-NUAL-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NUAL-NEXT:    ori $a2, $zero, 8
+; LA64-NUAL-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NUAL-NEXT:    jirl $ra, $ra, 0
+; LA64-NUAL-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, 16
+; LA64-NUAL-NEXT:    ret
 entry:
   %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 8)
   ret i32 %bcmp
 }
 
 define i32 @bcmp_size_15(ptr %s1, ptr %s2) nounwind optsize {
-; LA32-LABEL: bcmp_size_15:
-; LA32:       # %bb.0: # %entry
-; LA32-NEXT:    addi.w $sp, $sp, -16
-; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    ori $a2, $zero, 15
-; LA32-NEXT:    bl bcmp
-; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
-; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    ret
+; LA32-UAL-LABEL: bcmp_size_15:
+; LA32-UAL:       # %bb.0: # %entry
+; LA32-UAL-NEXT:    ld.w $a2, $a0, 0
+; LA32-UAL-NEXT:    ld.w $a3, $a1, 0
+; LA32-UAL-NEXT:    ld.w $a4, $a0, 4
+; LA32-UAL-NEXT:    ld.w $a5, $a1, 4
+; LA32-UAL-NEXT:    ld.w $a6, $a0, 8
+; LA32-UAL-NEXT:    ld.w $a7, $a1, 8
+; LA32-UAL-NEXT:    ld.w $a0, $a0, 11
+; LA32-UAL-NEXT:    ld.w $a1, $a1, 11
+; LA32-UAL-NEXT:    xor $a2, $a2, $a3
+; LA32-UAL-NEXT:    xor $a3, $a4, $a5
+; LA32-UAL-NEXT:    xor $a4, $a6, $a7
+; LA32-UAL-NEXT:    xor $a0, $a0, $a1
+; LA32-UAL-NEXT:    or $a1, $a2, $a3
+; LA32-UAL-NEXT:    or $a0, $a4, $a0
+; LA32-UAL-NEXT:    or $a0, $a1, $a0
+; LA32-UAL-NEXT:    sltu $a0, $zero, $a0
+; LA32-UAL-NEXT:    ret
 ;
-; LA64-LABEL: bcmp_size_15:
-; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    addi.d $sp, $sp, -16
-; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    ori $a2, $zero, 15
-; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
-; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    ret
+; LA64-UAL-LABEL: bcmp_size_15:
+; LA64-UAL:       # %bb.0: # %entry
+; LA64-UAL-NEXT:    ld.d $a2, $a0, 0
+; LA64-UAL-NEXT:    ld.d $a3, $a1, 0
+; LA64-UAL-NEXT:    ld.d $a0, $a0, 7
+; LA64-UAL-NEXT:    ld.d $a1, $a1, 7
+; LA64-UAL-NEXT:    xor $a2, $a2, $a3
+; LA64-UAL-NEXT:    xor $a0, $a0, $a1
+; LA64-UAL-NEXT:    or $a0, $a2, $a0
+; LA64-UAL-NEXT:    sltu $a0, $zero, $a0
+; LA64-UAL-NEXT:    ret
+;
+; LA32-NUAL-LABEL: bcmp_size_15:
+; LA32-NUAL:       # %bb.0: # %entry
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, -16
+; LA32-NUAL-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NUAL-NEXT:    ori $a2, $zero, 15
+; LA32-NUAL-NEXT:    bl bcmp
+; LA32-NUAL-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, 16
+; LA32-NUAL-NEXT:    ret
+;
+; LA64-NUAL-LABEL: bcmp_size_15:
+; LA64-NUAL:       # %bb.0: # %entry
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, -16
+; LA64-NUAL-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NUAL-NEXT:    ori $a2, $zero, 15
+; LA64-NUAL-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NUAL-NEXT:    jirl $ra, $ra, 0
+; LA64-NUAL-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, 16
+; LA64-NUAL-NEXT:    ret
 entry:
   %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 15)
   ret i32 %bcmp
 }
 
 define i32 @bcmp_size_16(ptr %s1, ptr %s2) nounwind optsize {
-; LA32-LABEL: bcmp_size_16:
-; LA32:       # %bb.0: # %entry
-; LA32-NEXT:    addi.w $sp, $sp, -16
-; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    ori $a2, $zero, 16
-; LA32-NEXT:    bl bcmp
-; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
-; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    ret
+; LA32-UAL-LABEL: bcmp_size_16:
+; LA32-UAL:       # %bb.0: # %entry
+; LA32-UAL-NEXT:    ld.w $a2, $a0, 0
+; LA32-UAL-NEXT:    ld.w $a3, $a1, 0
+; LA32-UAL-NEXT:    ld.w $a4, $a0, 4
+; LA32-UAL-NEXT:    ld.w $a5, $a1, 4
+; LA32-UAL-NEXT:    ld.w $a6, $a0, 8
+; LA32-UAL-NEXT:    ld.w $a7, $a1, 8
+; LA32-UAL-NEXT:    ld.w $a0, $a0, 12
+; LA32-UAL-NEXT:    ld.w $a1, $a1, 12
+; LA32-UAL-NEXT:    xor $a2, $a2, $a3
+; LA32-UAL-NEXT:    xor $a3, $a4, $a5
+; LA32-UAL-NEXT:    xor $a4, $a6, $a7
+; LA32-UAL-NEXT:    xor $a0, $a0, $a1
+; LA32-UAL-NEXT:    or $a1, $a2, $a3
+; LA32-UAL-NEXT:    or $a0, $a4, $a0
+; LA32-UAL-NEXT:    or $a0, $a1, $a0
+; LA32-UAL-NEXT:    sltu $a0, $zero, $a0
+; LA32-UAL-NEXT:    ret
 ;
-; LA64-LABEL: bcmp_size_16:
-; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    addi.d $sp, $sp, -16
-; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    ori $a2, $zero, 16
-; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
-; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    ret
+; LA64-UAL-LABEL: bcmp_size_16:
+; LA64-UAL:       # %bb.0: # %entry
+; LA64-UAL-NEXT:    ld.d $a2, $a0, 0
+; LA64-UAL-NEXT:    ld.d $a3, $a1, 0
+; LA64-UAL-NEXT:    ld.d $a0, $a0, 8
+; LA64-UAL-NEXT:    ld.d $a1, $a1, 8
+; LA64-UAL-NEXT:    xor $a2, $a2, $a3
+; LA64-UAL-NEXT:    xor $a0, $a0, $a1
+; LA64-UAL-NEXT:    or $a0, $a2, $a0
+; LA64-UAL-NEXT:    sltu $a0, $zero, $a0
+; LA64-UAL-NEXT:    ret
+;
+; LA32-NUAL-LABEL: bcmp_size_16:
+; LA32-NUAL:       # %bb.0: # %entry
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, -16
+; LA32-NUAL-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NUAL-NEXT:    ori $a2, $zero, 16
+; LA32-NUAL-NEXT:    bl bcmp
+; LA32-NUAL-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, 16
+; LA32-NUAL-NEXT:    ret
+;
+; LA64-NUAL-LABEL: bcmp_size_16:
+; LA64-NUAL:       # %bb.0: # %entry
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, -16
+; LA64-NUAL-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NUAL-NEXT:    ori $a2, $zero, 16
+; LA64-NUAL-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NUAL-NEXT:    jirl $ra, $ra, 0
+; LA64-NUAL-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, 16
+; LA64-NUAL-NEXT:    ret
 entry:
   %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 16)
   ret i32 %bcmp
@@ -308,16 +536,36 @@ define i32 @bcmp_size_31(ptr %s1, ptr %s2) nounwind optsize {
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
 ;
-; LA64-LABEL: bcmp_size_31:
-; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    addi.d $sp, $sp, -16
-; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    ori $a2, $zero, 31
-; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
-; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    ret
+; LA64-UAL-LABEL: bcmp_size_31:
+; LA64-UAL:       # %bb.0: # %entry
+; LA64-UAL-NEXT:    ld.d $a2, $a0, 0
+; LA64-UAL-NEXT:    ld.d $a3, $a1, 0
+; LA64-UAL-NEXT:    ld.d $a4, $a0, 8
+; LA64-UAL-NEXT:    ld.d $a5, $a1, 8
+; LA64-UAL-NEXT:    ld.d $a6, $a0, 16
+; LA64-UAL-NEXT:    ld.d $a7, $a1, 16
+; LA64-UAL-NEXT:    ld.d $a0, $a0, 23
+; LA64-UAL-NEXT:    ld.d $a1, $a1, 23
+; LA64-UAL-NEXT:    xor $a2, $a2, $a3
+; LA64-UAL-NEXT:    xor $a3, $a4, $a5
+; LA64-UAL-NEXT:    xor $a4, $a6, $a7
+; LA64-UAL-NEXT:    xor $a0, $a0, $a1
+; LA64-UAL-NEXT:    or $a1, $a2, $a3
+; LA64-UAL-NEXT:    or $a0, $a4, $a0
+; LA64-UAL-NEXT:    or $a0, $a1, $a0
+; LA64-UAL-NEXT:    sltu $a0, $zero, $a0
+; LA64-UAL-NEXT:    ret
+;
+; LA64-NUAL-LABEL: bcmp_size_31:
+; LA64-NUAL:       # %bb.0: # %entry
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, -16
+; LA64-NUAL-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NUAL-NEXT:    ori $a2, $zero, 31
+; LA64-NUAL-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NUAL-NEXT:    jirl $ra, $ra, 0
+; LA64-NUAL-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, 16
+; LA64-NUAL-NEXT:    ret
 entry:
   %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 31)
   ret i32 %bcmp
@@ -334,16 +582,36 @@ define i32 @bcmp_size_32(ptr %s1, ptr %s2) nounwind optsize {
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
 ;
-; LA64-LABEL: bcmp_size_32:
-; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    addi.d $sp, $sp, -16
-; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    ori $a2, $zero, 32
-; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
-; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    ret
+; LA64-UAL-LABEL: bcmp_size_32:
+; LA64-UAL:       # %bb.0: # %entry
+; LA64-UAL-NEXT:    ld.d $a2, $a0, 0
+; LA64-UAL-NEXT:    ld.d $a3, $a1, 0
+; LA64-UAL-NEXT:    ld.d $a4, $a0, 8
+; LA64-UAL-NEXT:    ld.d $a5, $a1, 8
+; LA64-UAL-NEXT:    ld.d $a6, $a0, 16
+; LA64-UAL-NEXT:    ld.d $a7, $a1, 16
+; LA64-UAL-NEXT:    ld.d $a0, $a0, 24
+; LA64-UAL-NEXT:    ld.d $a1, $a1, 24
+; LA64-UAL-NEXT:    xor $a2, $a2, $a3
+; LA64-UAL-NEXT:    xor $a3, $a4, $a5
+; LA64-UAL-NEXT:    xor $a4, $a6, $a7
+; LA64-UAL-NEXT:    xor $a0, $a0, $a1
+; LA64-UAL-NEXT:    or $a1, $a2, $a3
+; LA64-UAL-NEXT:    or $a0, $a4, $a0
+; LA64-UAL-NEXT:    or $a0, $a1, $a0
+; LA64-UAL-NEXT:    sltu $a0, $zero, $a0
+; LA64-UAL-NEXT:    ret
+;
+; LA64-NUAL-LABEL: bcmp_size_32:
+; LA64-NUAL:       # %bb.0: # %entry
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, -16
+; LA64-NUAL-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NUAL-NEXT:    ori $a2, $zero, 32
+; LA64-NUAL-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NUAL-NEXT:    jirl $ra, $ra, 0
+; LA64-NUAL-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, 16
+; LA64-NUAL-NEXT:    ret
 entry:
   %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 32)
   ret i32 %bcmp
@@ -478,28 +746,44 @@ entry:
 }
 
 define i1 @bcmp_eq_zero(ptr %s1, ptr %s2) nounwind optsize {
-; LA32-LABEL: bcmp_eq_zero:
-; LA32:       # %bb.0: # %entry
-; LA32-NEXT:    addi.w $sp, $sp, -16
-; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    ori $a2, $zero, 4
-; LA32-NEXT:    bl bcmp
-; LA32-NEXT:    sltui $a0, $a0, 1
-; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
-; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    ret
+; LA32-UAL-LABEL: bcmp_eq_zero:
+; LA32-UAL:       # %bb.0: # %entry
+; LA32-UAL-NEXT:    ld.w $a0, $a0, 0
+; LA32-UAL-NEXT:    ld.w $a1, $a1, 0
+; LA32-UAL-NEXT:    xor $a0, $a0, $a1
+; LA32-UAL-NEXT:    sltui $a0, $a0, 1
+; LA32-UAL-NEXT:    ret
 ;
-; LA64-LABEL: bcmp_eq_zero:
-; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    addi.d $sp, $sp, -16
-; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    ori $a2, $zero, 4
-; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
-; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    sltui $a0, $a0, 1
-; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    ret
+; LA64-UAL-LABEL: bcmp_eq_zero:
+; LA64-UAL:       # %bb.0: # %entry
+; LA64-UAL-NEXT:    ld.w $a0, $a0, 0
+; LA64-UAL-NEXT:    ld.w $a1, $a1, 0
+; LA64-UAL-NEXT:    xor $a0, $a0, $a1
+; LA64-UAL-NEXT:    sltui $a0, $a0, 1
+; LA64-UAL-NEXT:    ret
+;
+; LA32-NUAL-LABEL: bcmp_eq_zero:
+; LA32-NUAL:       # %bb.0: # %entry
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, -16
+; LA32-NUAL-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NUAL-NEXT:    ori $a2, $zero, 4
+; LA32-NUAL-NEXT:    bl bcmp
+; LA32-NUAL-NEXT:    sltui $a0, $a0, 1
+; LA32-NUAL-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, 16
+; LA32-NUAL-NEXT:    ret
+;
+; LA64-NUAL-LABEL: bcmp_eq_zero:
+; LA64-NUAL:       # %bb.0: # %entry
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, -16
+; LA64-NUAL-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NUAL-NEXT:    ori $a2, $zero, 4
+; LA64-NUAL-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NUAL-NEXT:    jirl $ra, $ra, 0
+; LA64-NUAL-NEXT:    sltui $a0, $a0, 1
+; LA64-NUAL-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, 16
+; LA64-NUAL-NEXT:    ret
 entry:
   %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 4)
   %ret = icmp eq i32 %bcmp, 0
@@ -507,28 +791,38 @@ entry:
 }
 
 define i1 @bcmp_lt_zero(ptr %s1, ptr %s2) nounwind optsize {
-; LA32-LABEL: bcmp_lt_zero:
-; LA32:       # %bb.0: # %entry
-; LA32-NEXT:    addi.w $sp, $sp, -16
-; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    ori $a2, $zero, 4
-; LA32-NEXT:    bl bcmp
-; LA32-NEXT:    srli.w $a0, $a0, 31
-; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
-; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    ret
+; LA32-UAL-LABEL: bcmp_lt_zero:
+; LA32-UAL:       # %bb.0: # %entry
+; LA32-UAL-NEXT:    move $a0, $zero
+; LA32-UAL-NEXT:    ret
 ;
-; LA64-LABEL: bcmp_lt_zero:
-; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    addi.d $sp, $sp, -16
-; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    ori $a2, $zero, 4
-; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
-; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    slti $a0, $a0, 0
-; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    ret
+; LA64-UAL-LABEL: bcmp_lt_zero:
+; LA64-UAL:       # %bb.0: # %entry
+; LA64-UAL-NEXT:    move $a0, $zero
+; LA64-UAL-NEXT:    ret
+;
+; LA32-NUAL-LABEL: bcmp_lt_zero:
+; LA32-NUAL:       # %bb.0: # %entry
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, -16
+; LA32-NUAL-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NUAL-NEXT:    ori $a2, $zero, 4
+; LA32-NUAL-NEXT:    bl bcmp
+; LA32-NUAL-NEXT:    srli.w $a0, $a0, 31
+; LA32-NUAL-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, 16
+; LA32-NUAL-NEXT:    ret
+;
+; LA64-NUAL-LABEL: bcmp_lt_zero:
+; LA64-NUAL:       # %bb.0: # %entry
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, -16
+; LA64-NUAL-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NUAL-NEXT:    ori $a2, $zero, 4
+; LA64-NUAL-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NUAL-NEXT:    jirl $ra, $ra, 0
+; LA64-NUAL-NEXT:    slti $a0, $a0, 0
+; LA64-NUAL-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, 16
+; LA64-NUAL-NEXT:    ret
 entry:
   %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 4)
   %ret = icmp slt i32 %bcmp, 0
@@ -536,28 +830,44 @@ entry:
 }
 
 define i1 @bcmp_gt_zero(ptr %s1, ptr %s2) nounwind optsize {
-; LA32-LABEL: bcmp_gt_zero:
-; LA32:       # %bb.0: # %entry
-; LA32-NEXT:    addi.w $sp, $sp, -16
-; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    ori $a2, $zero, 4
-; LA32-NEXT:    bl bcmp
-; LA32-NEXT:    slt $a0, $zero, $a0
-; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
-; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    ret
+; LA32-UAL-LABEL: bcmp_gt_zero:
+; LA32-UAL:       # %bb.0: # %entry
+; LA32-UAL-NEXT:    ld.w $a0, $a0, 0
+; LA32-UAL-NEXT:    ld.w $a1, $a1, 0
+; LA32-UAL-NEXT:    xor $a0, $a0, $a1
+; LA32-UAL-NEXT:    sltu $a0, $zero, $a0
+; LA32-UAL-NEXT:    ret
 ;
-; LA64-LABEL: bcmp_gt_zero:
-; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    addi.d $sp, $sp, -16
-; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    ori $a2, $zero, 4
-; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
-; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    slt $a0, $zero, $a0
-; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    ret
+; LA64-UAL-LABEL: bcmp_gt_zero:
+; LA64-UAL:       # %bb.0: # %entry
+; LA64-UAL-NEXT:    ld.w $a0, $a0, 0
+; LA64-UAL-NEXT:    ld.w $a1, $a1, 0
+; LA64-UAL-NEXT:    xor $a0, $a0, $a1
+; LA64-UAL-NEXT:    sltu $a0, $zero, $a0
+; LA64-UAL-NEXT:    ret
+;
+; LA32-NUAL-LABEL: bcmp_gt_zero:
+; LA32-NUAL:       # %bb.0: # %entry
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, -16
+; LA32-NUAL-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NUAL-NEXT:    ori $a2, $zero, 4
+; LA32-NUAL-NEXT:    bl bcmp
+; LA32-NUAL-NEXT:    slt $a0, $zero, $a0
+; LA32-NUAL-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, 16
+; LA32-NUAL-NEXT:    ret
+;
+; LA64-NUAL-LABEL: bcmp_gt_zero:
+; LA64-NUAL:       # %bb.0: # %entry
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, -16
+; LA64-NUAL-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NUAL-NEXT:    ori $a2, $zero, 4
+; LA64-NUAL-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NUAL-NEXT:    jirl $ra, $ra, 0
+; LA64-NUAL-NEXT:    slt $a0, $zero, $a0
+; LA64-NUAL-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, 16
+; LA64-NUAL-NEXT:    ret
 entry:
   %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 4)
   %ret = icmp sgt i32 %bcmp, 0
@@ -575,260 +885,916 @@ entry:
 }
 
 define i32 @memcmp_size_1(ptr %s1, ptr %s2) nounwind optsize {
-; LA32-LABEL: memcmp_size_1:
-; LA32:       # %bb.0: # %entry
-; LA32-NEXT:    addi.w $sp, $sp, -16
-; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    ori $a2, $zero, 1
-; LA32-NEXT:    bl memcmp
-; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
-; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    ret
+; LA32-UAL-LABEL: memcmp_size_1:
+; LA32-UAL:       # %bb.0: # %entry
+; LA32-UAL-NEXT:    ld.bu $a0, $a0, 0
+; LA32-UAL-NEXT:    ld.bu $a1, $a1, 0
+; LA32-UAL-NEXT:    sub.w $a0, $a0, $a1
+; LA32-UAL-NEXT:    ret
 ;
-; LA64-LABEL: memcmp_size_1:
-; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    addi.d $sp, $sp, -16
-; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    ori $a2, $zero, 1
-; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
-; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    ret
+; LA64-UAL-LABEL: memcmp_size_1:
+; LA64-UAL:       # %bb.0: # %entry
+; LA64-UAL-NEXT:    ld.bu $a0, $a0, 0
+; LA64-UAL-NEXT:    ld.bu $a1, $a1, 0
+; LA64-UAL-NEXT:    sub.d $a0, $a0, $a1
+; LA64-UAL-NEXT:    ret
+;
+; LA32-NUAL-LABEL: memcmp_size_1:
+; LA32-NUAL:       # %bb.0: # %entry
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, -16
+; LA32-NUAL-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NUAL-NEXT:    ori $a2, $zero, 1
+; LA32-NUAL-NEXT:    bl memcmp
+; LA32-NUAL-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, 16
+; LA32-NUAL-NEXT:    ret
+;
+; LA64-NUAL-LABEL: memcmp_size_1:
+; LA64-NUAL:       # %bb.0: # %entry
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, -16
+; LA64-NUAL-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NUAL-NEXT:    ori $a2, $zero, 1
+; LA64-NUAL-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NUAL-NEXT:    jirl $ra, $ra, 0
+; LA64-NUAL-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, 16
+; LA64-NUAL-NEXT:    ret
 entry:
   %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 1)
   ret i32 %memcmp
 }
 
 define i32 @memcmp_size_2(ptr %s1, ptr %s2) nounwind optsize {
-; LA32-LABEL: memcmp_size_2:
-; LA32:       # %bb.0: # %entry
-; LA32-NEXT:    addi.w $sp, $sp, -16
-; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    ori $a2, $zero, 2
-; LA32-NEXT:    bl memcmp
-; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
-; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    ret
+; LA32-UAL-LABEL: memcmp_size_2:
+; LA32-UAL:       # %bb.0: # %entry
+; LA32-UAL-NEXT:    ld.hu $a0, $a0, 0
+; LA32-UAL-NEXT:    ld.hu $a1, $a1, 0
+; LA32-UAL-NEXT:    srli.w $a2, $a0, 8
+; LA32-UAL-NEXT:    slli.w $a0, $a0, 8
+; LA32-UAL-NEXT:    or $a0, $a0, $a2
+; LA32-UAL-NEXT:    srli.w $a2, $a1, 8
+; LA32-UAL-NEXT:    slli.w $a1, $a1, 8
+; LA32-UAL-NEXT:    or $a1, $a1, $a2
+; LA32-UAL-NEXT:    lu12i.w $a2, 15
+; LA32-UAL-NEXT:    ori $a2, $a2, 4095
+; LA32-UAL-NEXT:    and $a0, $a0, $a2
+; LA32-UAL-NEXT:    and $a1, $a1, $a2
+; LA32-UAL-NEXT:    sub.w $a0, $a0, $a1
+; LA32-UAL-NEXT:    ret
 ;
-; LA64-LABEL: memcmp_size_2:
-; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    addi.d $sp, $sp, -16
-; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    ori $a2, $zero, 2
-; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
-; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    ret
+; LA64-UAL-LABEL: memcmp_size_2:
+; LA64-UAL:       # %bb.0: # %entry
+; LA64-UAL-NEXT:    ld.h $a0, $a0, 0
+; LA64-UAL-NEXT:    ld.h $a1, $a1, 0
+; LA64-UAL-NEXT:    revb.2h $a0, $a0
+; LA64-UAL-NEXT:    revb.2h $a1, $a1
+; LA64-UAL-NEXT:    bstrpick.d $a0, $a0, 15, 0
+; LA64-UAL-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-UAL-NEXT:    sub.d $a0, $a0, $a1
+; LA64-UAL-NEXT:    ret
+;
+; LA32-NUAL-LABEL: memcmp_size_2:
+; LA32-NUAL:       # %bb.0: # %entry
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, -16
+; LA32-NUAL-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NUAL-NEXT:    ori $a2, $zero, 2
+; LA32-NUAL-NEXT:    bl memcmp
+; LA32-NUAL-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, 16
+; LA32-NUAL-NEXT:    ret
+;
+; LA64-NUAL-LABEL: memcmp_size_2:
+; LA64-NUAL:       # %bb.0: # %entry
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, -16
+; LA64-NUAL-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NUAL-NEXT:    ori $a2, $zero, 2
+; LA64-NUAL-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NUAL-NEXT:    jirl $ra, $ra, 0
+; LA64-NUAL-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, 16
+; LA64-NUAL-NEXT:    ret
 entry:
   %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 2)
   ret i32 %memcmp
 }
 
 define i32 @memcmp_size_3(ptr %s1, ptr %s2) nounwind optsize {
-; LA32-LABEL: memcmp_size_3:
-; LA32:       # %bb.0: # %entry
-; LA32-NEXT:    addi.w $sp, $sp, -16
-; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    ori $a2, $zero, 3
-; LA32-NEXT:    bl memcmp
-; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
-; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    ret
+; LA32-UAL-LABEL: memcmp_size_3:
+; LA32-UAL:       # %bb.0: # %entry
+; LA32-UAL-NEXT:    ld.hu $a2, $a0, 0
+; LA32-UAL-NEXT:    ld.hu $a3, $a1, 0
+; LA32-UAL-NEXT:    srli.w $a4, $a2, 8
+; LA32-UAL-NEXT:    slli.w $a2, $a2, 8
+; LA32-UAL-NEXT:    or $a2, $a2, $a4
+; LA32-UAL-NEXT:    lu12i.w $a4, 15
+; LA32-UAL-NEXT:    ori $a4, $a4, 4095
+; LA32-UAL-NEXT:    and $a2, $a2, $a4
+; LA32-UAL-NEXT:    srli.w $a5, $a3, 8
+; LA32-UAL-NEXT:    slli.w $a3, $a3, 8
+; LA32-UAL-NEXT:    or $a3, $a3, $a5
+; LA32-UAL-NEXT:    and $a3, $a3, $a4
+; LA32-UAL-NEXT:    bne $a2, $a3, .LBB24_2
+; LA32-UAL-NEXT:  # %bb.1: # %loadbb1
+; LA32-UAL-NEXT:    ld.bu $a0, $a0, 2
+; LA32-UAL-NEXT:    ld.bu $a1, $a1, 2
+; LA32-UAL-NEXT:    sub.w $a0, $a0, $a1
+; LA32-UAL-NEXT:    ret
+; LA32-UAL-NEXT:  .LBB24_2: # %res_block
+; LA32-UAL-NEXT:    sltu $a0, $a2, $a3
+; LA32-UAL-NEXT:    sub.w $a0, $zero, $a0
+; LA32-UAL-NEXT:    ori $a0, $a0, 1
+; LA32-UAL-NEXT:    ret
 ;
-; LA64-LABEL: memcmp_size_3:
-; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    addi.d $sp, $sp, -16
-; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    ori $a2, $zero, 3
-; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
-; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    ret
+; LA64-UAL-LABEL: memcmp_size_3:
+; LA64-UAL:       # %bb.0: # %entry
+; LA64-UAL-NEXT:    ld.h $a2, $a0, 0
+; LA64-UAL-NEXT:    ld.h $a3, $a1, 0
+; LA64-UAL-NEXT:    revb.2h $a2, $a2
+; LA64-UAL-NEXT:    bstrpick.d $a2, $a2, 15, 0
+; LA64-UAL-NEXT:    revb.2h $a3, $a3
+; LA64-UAL-NEXT:    bstrpick.d $a3, $a3, 15, 0
+; LA64-UAL-NEXT:    bne $a2, $a3, .LBB24_2
+; LA64-UAL-NEXT:  # %bb.1: # %loadbb1
+; LA64-UAL-NEXT:    ld.bu $a0, $a0, 2
+; LA64-UAL-NEXT:    ld.bu $a1, $a1, 2
+; LA64-UAL-NEXT:    sub.d $a0, $a0, $a1
+; LA64-UAL-NEXT:    ret
+; LA64-UAL-NEXT:  .LBB24_2: # %res_block
+; LA64-UAL-NEXT:    sltu $a0, $a2, $a3
+; LA64-UAL-NEXT:    sub.d $a0, $zero, $a0
+; LA64-UAL-NEXT:    ori $a0, $a0, 1
+; LA64-UAL-NEXT:    ret
+;
+; LA32-NUAL-LABEL: memcmp_size_3:
+; LA32-NUAL:       # %bb.0: # %entry
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, -16
+; LA32-NUAL-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NUAL-NEXT:    ori $a2, $zero, 3
+; LA32-NUAL-NEXT:    bl memcmp
+; LA32-NUAL-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, 16
+; LA32-NUAL-NEXT:    ret
+;
+; LA64-NUAL-LABEL: memcmp_size_3:
+; LA64-NUAL:       # %bb.0: # %entry
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, -16
+; LA64-NUAL-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NUAL-NEXT:    ori $a2, $zero, 3
+; LA64-NUAL-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NUAL-NEXT:    jirl $ra, $ra, 0
+; LA64-NUAL-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, 16
+; LA64-NUAL-NEXT:    ret
 entry:
   %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 3)
   ret i32 %memcmp
 }
 
 define i32 @memcmp_size_4(ptr %s1, ptr %s2) nounwind optsize {
-; LA32-LABEL: memcmp_size_4:
-; LA32:       # %bb.0: # %entry
-; LA32-NEXT:    addi.w $sp, $sp, -16
-; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    ori $a2, $zero, 4
-; LA32-NEXT:    bl memcmp
-; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
-; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    ret
+; LA32-UAL-LABEL: memcmp_size_4:
+; LA32-UAL:       # %bb.0: # %entry
+; LA32-UAL-NEXT:    ld.w $a0, $a0, 0
+; LA32-UAL-NEXT:    ld.w $a1, $a1, 0
+; LA32-UAL-NEXT:    srli.w $a2, $a0, 8
+; LA32-UAL-NEXT:    lu12i.w $a3, 15
+; LA32-UAL-NEXT:    ori $a3, $a3, 3840
+; LA32-UAL-NEXT:    and $a2, $a2, $a3
+; LA32-UAL-NEXT:    srli.w $a4, $a0, 24
+; LA32-UAL-NEXT:    or $a2, $a2, $a4
+; LA32-UAL-NEXT:    and $a4, $a0, $a3
+; LA32-UAL-NEXT:    slli.w $a4, $a4, 8
+; LA32-UAL-NEXT:    slli.w $a0, $a0, 24
+; LA32-UAL-NEXT:    or $a0, $a0, $a4
+; LA32-UAL-NEXT:    or $a0, $a0, $a2
+; LA32-UAL-NEXT:    srli.w $a2, $a1, 8
+; LA32-UAL-NEXT:    and $a2, $a2, $a3
+; LA32-UAL-NEXT:    srli.w $a4, $a1, 24
+; LA32-UAL-NEXT:    or $a2, $a2, $a4
+; LA32-UAL-NEXT:    and $a3, $a1, $a3
+; LA32-UAL-NEXT:    slli.w $a3, $a3, 8
+; LA32-UAL-NEXT:    slli.w $a1, $a1, 24
+; LA32-UAL-NEXT:    or $a1, $a1, $a3
+; LA32-UAL-NEXT:    or $a1, $a1, $a2
+; LA32-UAL-NEXT:    sltu $a2, $a0, $a1
+; LA32-UAL-NEXT:    sltu $a0, $a1, $a0
+; LA32-UAL-NEXT:    sub.w $a0, $a0, $a2
+; LA32-UAL-NEXT:    ret
 ;
-; LA64-LABEL: memcmp_size_4:
-; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    addi.d $sp, $sp, -16
-; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    ori $a2, $zero, 4
-; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
-; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    ret
+; LA64-UAL-LABEL: memcmp_size_4:
+; LA64-UAL:       # %bb.0: # %entry
+; LA64-UAL-NEXT:    ld.w $a0, $a0, 0
+; LA64-UAL-NEXT:    ld.w $a1, $a1, 0
+; LA64-UAL-NEXT:    revb.2w $a0, $a0
+; LA64-UAL-NEXT:    addi.w $a0, $a0, 0
+; LA64-UAL-NEXT:    revb.2w $a1, $a1
+; LA64-UAL-NEXT:    addi.w $a1, $a1, 0
+; LA64-UAL-NEXT:    sltu $a2, $a0, $a1
+; LA64-UAL-NEXT:    sltu $a0, $a1, $a0
+; LA64-UAL-NEXT:    sub.d $a0, $a0, $a2
+; LA64-UAL-NEXT:    ret
+;
+; LA32-NUAL-LABEL: memcmp_size_4:
+; LA32-NUAL:       # %bb.0: # %entry
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, -16
+; LA32-NUAL-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NUAL-NEXT:    ori $a2, $zero, 4
+; LA32-NUAL-NEXT:    bl memcmp
+; LA32-NUAL-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, 16
+; LA32-NUAL-NEXT:    ret
+;
+; LA64-NUAL-LABEL: memcmp_size_4:
+; LA64-NUAL:       # %bb.0: # %entry
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, -16
+; LA64-NUAL-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NUAL-NEXT:    ori $a2, $zero, 4
+; LA64-NUAL-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NUAL-NEXT:    jirl $ra, $ra, 0
+; LA64-NUAL-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, 16
+; LA64-NUAL-NEXT:    ret
 entry:
   %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 4)
   ret i32 %memcmp
 }
 
 define i32 @memcmp_size_5(ptr %s1, ptr %s2) nounwind optsize {
-; LA32-LABEL: memcmp_size_5:
-; LA32:       # %bb.0: # %entry
-; LA32-NEXT:    addi.w $sp, $sp, -16
-; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    ori $a2, $zero, 5
-; LA32-NEXT:    bl memcmp
-; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
-; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    ret
+; LA32-UAL-LABEL: memcmp_size_5:
+; LA32-UAL:       # %bb.0: # %entry
+; LA32-UAL-NEXT:    ld.w $a2, $a0, 0
+; LA32-UAL-NEXT:    ld.w $a3, $a1, 0
+; LA32-UAL-NEXT:    srli.w $a4, $a2, 8
+; LA32-UAL-NEXT:    lu12i.w $a5, 15
+; LA32-UAL-NEXT:    ori $a5, $a5, 3840
+; LA32-UAL-NEXT:    and $a4, $a4, $a5
+; LA32-UAL-NEXT:    srli.w $a6, $a2, 24
+; LA32-UAL-NEXT:    or $a4, $a4, $a6
+; LA32-UAL-NEXT:    and $a6, $a2, $a5
+; LA32-UAL-NEXT:    slli.w $a6, $a6, 8
+; LA32-UAL-NEXT:    slli.w $a2, $a2, 24
+; LA32-UAL-NEXT:    or $a2, $a2, $a6
+; LA32-UAL-NEXT:    or $a2, $a2, $a4
+; LA32-UAL-NEXT:    srli.w $a4, $a3, 8
+; LA32-UAL-NEXT:    and $a4, $a4, $a5
+; LA32-UAL-NEXT:    srli.w $a6, $a3, 24
+; LA32-UAL-NEXT:    or $a4, $a4, $a6
+; LA32-UAL-NEXT:    and $a5, $a3, $a5
+; LA32-UAL-NEXT:    slli.w $a5, $a5, 8
+; LA32-UAL-NEXT:    slli.w $a3, $a3, 24
+; LA32-UAL-NEXT:    or $a3, $a3, $a5
+; LA32-UAL-NEXT:    or $a3, $a3, $a4
+; LA32-UAL-NEXT:    bne $a2, $a3, .LBB26_2
+; LA32-UAL-NEXT:  # %bb.1: # %loadbb1
+; LA32-UAL-NEXT:    ld.bu $a0, $a0, 4
+; LA32-UAL-NEXT:    ld.bu $a1, $a1, 4
+; LA32-UAL-NEXT:    sub.w $a0, $a0, $a1
+; LA32-UAL-NEXT:    ret
+; LA32-UAL-NEXT:  .LBB26_2: # %res_block
+; LA32-UAL-NEXT:    sltu $a0, $a2, $a3
+; LA32-UAL-NEXT:    sub.w $a0, $zero, $a0
+; LA32-UAL-NEXT:    ori $a0, $a0, 1
+; LA32-UAL-NEXT:    ret
 ;
-; LA64-LABEL: memcmp_size_5:
-; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    addi.d $sp, $sp, -16
-; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    ori $a2, $zero, 5
-; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
-; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    ret
+; LA64-UAL-LABEL: memcmp_size_5:
+; LA64-UAL:       # %bb.0: # %entry
+; LA64-UAL-NEXT:    ld.w $a2, $a0, 0
+; LA64-UAL-NEXT:    ld.w $a3, $a1, 0
+; LA64-UAL-NEXT:    revb.2w $a2, $a2
+; LA64-UAL-NEXT:    addi.w $a2, $a2, 0
+; LA64-UAL-NEXT:    revb.2w $a3, $a3
+; LA64-UAL-NEXT:    addi.w $a3, $a3, 0
+; LA64-UAL-NEXT:    bne $a2, $a3, .LBB26_2
+; LA64-UAL-NEXT:  # %bb.1: # %loadbb1
+; LA64-UAL-NEXT:    ld.bu $a0, $a0, 4
+; LA64-UAL-NEXT:    ld.bu $a1, $a1, 4
+; LA64-UAL-NEXT:    sub.d $a0, $a0, $a1
+; LA64-UAL-NEXT:    ret
+; LA64-UAL-NEXT:  .LBB26_2: # %res_block
+; LA64-UAL-NEXT:    sltu $a0, $a2, $a3
+; LA64-UAL-NEXT:    sub.d $a0, $zero, $a0
+; LA64-UAL-NEXT:    ori $a0, $a0, 1
+; LA64-UAL-NEXT:    ret
+;
+; LA32-NUAL-LABEL: memcmp_size_5:
+; LA32-NUAL:       # %bb.0: # %entry
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, -16
+; LA32-NUAL-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NUAL-NEXT:    ori $a2, $zero, 5
+; LA32-NUAL-NEXT:    bl memcmp
+; LA32-NUAL-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, 16
+; LA32-NUAL-NEXT:    ret
+;
+; LA64-NUAL-LABEL: memcmp_size_5:
+; LA64-NUAL:       # %bb.0: # %entry
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, -16
+; LA64-NUAL-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NUAL-NEXT:    ori $a2, $zero, 5
+; LA64-NUAL-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NUAL-NEXT:    jirl $ra, $ra, 0
+; LA64-NUAL-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, 16
+; LA64-NUAL-NEXT:    ret
 entry:
   %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 5)
   ret i32 %memcmp
 }
 
 define i32 @memcmp_size_6(ptr %s1, ptr %s2) nounwind optsize {
-; LA32-LABEL: memcmp_size_6:
-; LA32:       # %bb.0: # %entry
-; LA32-NEXT:    addi.w $sp, $sp, -16
-; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    ori $a2, $zero, 6
-; LA32-NEXT:    bl memcmp
-; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
-; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    ret
+; LA32-UAL-LABEL: memcmp_size_6:
+; LA32-UAL:       # %bb.0: # %entry
+; LA32-UAL-NEXT:    ld.w $a3, $a0, 0
+; LA32-UAL-NEXT:    ld.w $a4, $a1, 0
+; LA32-UAL-NEXT:    srli.w $a5, $a3, 8
+; LA32-UAL-NEXT:    lu12i.w $a2, 15
+; LA32-UAL-NEXT:    ori $a6, $a2, 3840
+; LA32-UAL-NEXT:    and $a5, $a5, $a6
+; LA32-UAL-NEXT:    srli.w $a7, $a3, 24
+; LA32-UAL-NEXT:    or $a5, $a5, $a7
+; LA32-UAL-NEXT:    and $a7, $a3, $a6
+; LA32-UAL-NEXT:    slli.w $a7, $a7, 8
+; LA32-UAL-NEXT:    slli.w $a3, $a3, 24
+; LA32-UAL-NEXT:    or $a3, $a3, $a7
+; LA32-UAL-NEXT:    or $a3, $a3, $a5
+; LA32-UAL-NEXT:    srli.w $a5, $a4, 8
+; LA32-UAL-NEXT:    and $a5, $a5, $a6
+; LA32-UAL-NEXT:    srli.w $a7, $a4, 24
+; LA32-UAL-NEXT:    or $a5, $a5, $a7
+; LA32-UAL-NEXT:    and $a6, $a4, $a6
+; LA32-UAL-NEXT:    slli.w $a6, $a6, 8
+; LA32-UAL-NEXT:    slli.w $a4, $a4, 24
+; LA32-UAL-NEXT:    or $a4, $a4, $a6
+; LA32-UAL-NEXT:    or $a4, $a4, $a5
+; LA32-UAL-NEXT:    bne $a3, $a4, .LBB27_3
+; LA32-UAL-NEXT:  # %bb.1: # %loadbb1
+; LA32-UAL-NEXT:    ld.hu $a0, $a0, 4
+; LA32-UAL-NEXT:    ld.hu $a1, $a1, 4
+; LA32-UAL-NEXT:    srli.w $a3, $a0, 8
+; LA32-UAL-NEXT:    slli.w $a0, $a0, 8
+; LA32-UAL-NEXT:    or $a0, $a0, $a3
+; LA32-UAL-NEXT:    srli.w $a3, $a1, 8
+; LA32-UAL-NEXT:    slli.w $a1, $a1, 8
+; LA32-UAL-NEXT:    or $a1, $a1, $a3
+; LA32-UAL-NEXT:    ori $a2, $a2, 4095
+; LA32-UAL-NEXT:    and $a3, $a0, $a2
+; LA32-UAL-NEXT:    and $a4, $a1, $a2
+; LA32-UAL-NEXT:    bne $a3, $a4, .LBB27_3
+; LA32-UAL-NEXT:  # %bb.2:
+; LA32-UAL-NEXT:    move $a0, $zero
+; LA32-UAL-NEXT:    ret
+; LA32-UAL-NEXT:  .LBB27_3: # %res_block
+; LA32-UAL-NEXT:    sltu $a0, $a3, $a4
+; LA32-UAL-NEXT:    sub.w $a0, $zero, $a0
+; LA32-UAL-NEXT:    ori $a0, $a0, 1
+; LA32-UAL-NEXT:    ret
 ;
-; LA64-LABEL: memcmp_size_6:
-; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    addi.d $sp, $sp, -16
-; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    ori $a2, $zero, 6
-; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
-; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    ret
+; LA64-UAL-LABEL: memcmp_size_6:
+; LA64-UAL:       # %bb.0: # %entry
+; LA64-UAL-NEXT:    ld.w $a2, $a0, 0
+; LA64-UAL-NEXT:    ld.w $a3, $a1, 0
+; LA64-UAL-NEXT:    revb.2w $a2, $a2
+; LA64-UAL-NEXT:    addi.w $a4, $a2, 0
+; LA64-UAL-NEXT:    revb.2w $a3, $a3
+; LA64-UAL-NEXT:    addi.w $a5, $a3, 0
+; LA64-UAL-NEXT:    bne $a4, $a5, .LBB27_3
+; LA64-UAL-NEXT:  # %bb.1: # %loadbb1
+; LA64-UAL-NEXT:    ld.h $a0, $a0, 4
+; LA64-UAL-NEXT:    ld.h $a1, $a1, 4
+; LA64-UAL-NEXT:    revb.2h $a0, $a0
+; LA64-UAL-NEXT:    revb.2h $a1, $a1
+; LA64-UAL-NEXT:    bstrpick.d $a2, $a0, 15, 0
+; LA64-UAL-NEXT:    bstrpick.d $a3, $a1, 15, 0
+; LA64-UAL-NEXT:    bne $a2, $a3, .LBB27_3
+; LA64-UAL-NEXT:  # %bb.2:
+; LA64-UAL-NEXT:    move $a0, $zero
+; LA64-UAL-NEXT:    ret
+; LA64-UAL-NEXT:  .LBB27_3: # %res_block
+; LA64-UAL-NEXT:    addi.w $a0, $a3, 0
+; LA64-UAL-NEXT:    addi.w $a1, $a2, 0
+; LA64-UAL-NEXT:    sltu $a0, $a1, $a0
+; LA64-UAL-NEXT:    sub.d $a0, $zero, $a0
+; LA64-UAL-NEXT:    ori $a0, $a0, 1
+; LA64-UAL-NEXT:    ret
+;
+; LA32-NUAL-LABEL: memcmp_size_6:
+; LA32-NUAL:       # %bb.0: # %entry
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, -16
+; LA32-NUAL-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NUAL-NEXT:    ori $a2, $zero, 6
+; LA32-NUAL-NEXT:    bl memcmp
+; LA32-NUAL-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, 16
+; LA32-NUAL-NEXT:    ret
+;
+; LA64-NUAL-LABEL: memcmp_size_6:
+; LA64-NUAL:       # %bb.0: # %entry
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, -16
+; LA64-NUAL-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NUAL-NEXT:    ori $a2, $zero, 6
+; LA64-NUAL-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NUAL-NEXT:    jirl $ra, $ra, 0
+; LA64-NUAL-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, 16
+; LA64-NUAL-NEXT:    ret
 entry:
   %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 6)
   ret i32 %memcmp
 }
 
 define i32 @memcmp_size_7(ptr %s1, ptr %s2) nounwind optsize {
-; LA32-LABEL: memcmp_size_7:
-; LA32:       # %bb.0: # %entry
-; LA32-NEXT:    addi.w $sp, $sp, -16
-; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    ori $a2, $zero, 7
-; LA32-NEXT:    bl memcmp
-; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
-; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    ret
+; LA32-UAL-LABEL: memcmp_size_7:
+; LA32-UAL:       # %bb.0: # %entry
+; LA32-UAL-NEXT:    ld.w $a3, $a0, 0
+; LA32-UAL-NEXT:    ld.w $a4, $a1, 0
+; LA32-UAL-NEXT:    srli.w $a5, $a3, 8
+; LA32-UAL-NEXT:    lu12i.w $a2, 15
+; LA32-UAL-NEXT:    ori $a2, $a2, 3840
+; LA32-UAL-NEXT:    and $a5, $a5, $a2
+; LA32-UAL-NEXT:    srli.w $a6, $a3, 24
+; LA32-UAL-NEXT:    or $a5, $a5, $a6
+; LA32-UAL-NEXT:    and $a6, $a3, $a2
+; LA32-UAL-NEXT:    slli.w $a6, $a6, 8
+; LA32-UAL-NEXT:    slli.w $a3, $a3, 24
+; LA32-UAL-NEXT:    or $a3, $a3, $a6
+; LA32-UAL-NEXT:    or $a3, $a3, $a5
+; LA32-UAL-NEXT:    srli.w $a5, $a4, 8
+; LA32-UAL-NEXT:    and $a5, $a5, $a2
+; LA32-UAL-NEXT:    srli.w $a6, $a4, 24
+; LA32-UAL-NEXT:    or $a5, $a5, $a6
+; LA32-UAL-NEXT:    and $a6, $a4, $a2
+; LA32-UAL-NEXT:    slli.w $a6, $a6, 8
+; LA32-UAL-NEXT:    slli.w $a4, $a4, 24
+; LA32-UAL-NEXT:    or $a4, $a4, $a6
+; LA32-UAL-NEXT:    or $a4, $a4, $a5
+; LA32-UAL-NEXT:    bne $a3, $a4, .LBB28_3
+; LA32-UAL-NEXT:  # %bb.1: # %loadbb1
+; LA32-UAL-NEXT:    ld.w $a0, $a0, 3
+; LA32-UAL-NEXT:    ld.w $a1, $a1, 3
+; LA32-UAL-NEXT:    srli.w $a3, $a0, 8
+; LA32-UAL-NEXT:    and $a3, $a3, $a2
+; LA32-UAL-NEXT:    srli.w $a4, $a0, 24
+; LA32-UAL-NEXT:    or $a3, $a3, $a4
+; LA32-UAL-NEXT:    and $a4, $a0, $a2
+; LA32-UAL-NEXT:    slli.w $a4, $a4, 8
+; LA32-UAL-NEXT:    slli.w $a0, $a0, 24
+; LA32-UAL-NEXT:    or $a0, $a0, $a4
+; LA32-UAL-NEXT:    or $a3, $a0, $a3
+; LA32-UAL-NEXT:    srli.w $a0, $a1, 8
+; LA32-UAL-NEXT:    and $a0, $a0, $a2
+; LA32-UAL-NEXT:    srli.w $a4, $a1, 24
+; LA32-UAL-NEXT:    or $a0, $a0, $a4
+; LA32-UAL-NEXT:    and $a2, $a1, $a2
+; LA32-UAL-NEXT:    slli.w $a2, $a2, 8
+; LA32-UAL-NEXT:    slli.w $a1, $a1, 24
+; LA32-UAL-NEXT:    or $a1, $a1, $a2
+; LA32-UAL-NEXT:    or $a4, $a1, $a0
+; LA32-UAL-NEXT:    bne $a3, $a4, .LBB28_3
+; LA32-UAL-NEXT:  # %bb.2:
+; LA32-UAL-NEXT:    move $a0, $zero
+; LA32-UAL-NEXT:    ret
+; LA32-UAL-NEXT:  .LBB28_3: # %res_block
+; LA32-UAL-NEXT:    sltu $a0, $a3, $a4
+; LA32-UAL-NEXT:    sub.w $a0, $zero, $a0
+; LA32-UAL-NEXT:    ori $a0, $a0, 1
+; LA32-UAL-NEXT:    ret
 ;
-; LA64-LABEL: memcmp_size_7:
-; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    addi.d $sp, $sp, -16
-; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    ori $a2, $zero, 7
-; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
-; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    ret
+; LA64-UAL-LABEL: memcmp_size_7:
+; LA64-UAL:       # %bb.0: # %entry
+; LA64-UAL-NEXT:    ld.w $a2, $a0, 0
+; LA64-UAL-NEXT:    ld.w $a3, $a1, 0
+; LA64-UAL-NEXT:    revb.2w $a2, $a2
+; LA64-UAL-NEXT:    addi.w $a4, $a2, 0
+; LA64-UAL-NEXT:    revb.2w $a3, $a3
+; LA64-UAL-NEXT:    addi.w $a5, $a3, 0
+; LA64-UAL-NEXT:    bne $a4, $a5, .LBB28_3
+; LA64-UAL-NEXT:  # %bb.1: # %loadbb1
+; LA64-UAL-NEXT:    ld.w $a0, $a0, 3
+; LA64-UAL-NEXT:    ld.w $a1, $a1, 3
+; LA64-UAL-NEXT:    revb.2w $a2, $a0
+; LA64-UAL-NEXT:    addi.w $a0, $a2, 0
+; LA64-UAL-NEXT:    revb.2w $a3, $a1
+; LA64-UAL-NEXT:    addi.w $a1, $a3, 0
+; LA64-UAL-NEXT:    bne $a0, $a1, .LBB28_3
+; LA64-UAL-NEXT:  # %bb.2:
+; LA64-UAL-NEXT:    move $a0, $zero
+; LA64-UAL-NEXT:    ret
+; LA64-UAL-NEXT:  .LBB28_3: # %res_block
+; LA64-UAL-NEXT:    addi.w $a0, $a3, 0
+; LA64-UAL-NEXT:    addi.w $a1, $a2, 0
+; LA64-UAL-NEXT:    sltu $a0, $a1, $a0
+; LA64-UAL-NEXT:    sub.d $a0, $zero, $a0
+; LA64-UAL-NEXT:    ori $a0, $a0, 1
+; LA64-UAL-NEXT:    ret
+;
+; LA32-NUAL-LABEL: memcmp_size_7:
+; LA32-NUAL:       # %bb.0: # %entry
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, -16
+; LA32-NUAL-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NUAL-NEXT:    ori $a2, $zero, 7
+; LA32-NUAL-NEXT:    bl memcmp
+; LA32-NUAL-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, 16
+; LA32-NUAL-NEXT:    ret
+;
+; LA64-NUAL-LABEL: memcmp_size_7:
+; LA64-NUAL:       # %bb.0: # %entry
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, -16
+; LA64-NUAL-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NUAL-NEXT:    ori $a2, $zero, 7
+; LA64-NUAL-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NUAL-NEXT:    jirl $ra, $ra, 0
+; LA64-NUAL-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, 16
+; LA64-NUAL-NEXT:    ret
 entry:
   %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 7)
   ret i32 %memcmp
 }
 
 define i32 @memcmp_size_8(ptr %s1, ptr %s2) nounwind optsize {
-; LA32-LABEL: memcmp_size_8:
-; LA32:       # %bb.0: # %entry
-; LA32-NEXT:    addi.w $sp, $sp, -16
-; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    ori $a2, $zero, 8
-; LA32-NEXT:    bl memcmp
-; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
-; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    ret
+; LA32-UAL-LABEL: memcmp_size_8:
+; LA32-UAL:       # %bb.0: # %entry
+; LA32-UAL-NEXT:    ld.w $a3, $a0, 0
+; LA32-UAL-NEXT:    ld.w $a4, $a1, 0
+; LA32-UAL-NEXT:    srli.w $a5, $a3, 8
+; LA32-UAL-NEXT:    lu12i.w $a2, 15
+; LA32-UAL-NEXT:    ori $a2, $a2, 3840
+; LA32-UAL-NEXT:    and $a5, $a5, $a2
+; LA32-UAL-NEXT:    srli.w $a6, $a3, 24
+; LA32-UAL-NEXT:    or $a5, $a5, $a6
+; LA32-UAL-NEXT:    and $a6, $a3, $a2
+; LA32-UAL-NEXT:    slli.w $a6, $a6, 8
+; LA32-UAL-NEXT:    slli.w $a3, $a3, 24
+; LA32-UAL-NEXT:    or $a3, $a3, $a6
+; LA32-UAL-NEXT:    or $a3, $a3, $a5
+; LA32-UAL-NEXT:    srli.w $a5, $a4, 8
+; LA32-UAL-NEXT:    and $a5, $a5, $a2
+; LA32-UAL-NEXT:    srli.w $a6, $a4, 24
+; LA32-UAL-NEXT:    or $a5, $a5, $a6
+; LA32-UAL-NEXT:    and $a6, $a4, $a2
+; LA32-UAL-NEXT:    slli.w $a6, $a6, 8
+; LA32-UAL-NEXT:    slli.w $a4, $a4, 24
+; LA32-UAL-NEXT:    or $a4, $a4, $a6
+; LA32-UAL-NEXT:    or $a4, $a4, $a5
+; LA32-UAL-NEXT:    bne $a3, $a4, .LBB29_3
+; LA32-UAL-NEXT:  # %bb.1: # %loadbb1
+; LA32-UAL-NEXT:    ld.w $a0, $a0, 4
+; LA32-UAL-NEXT:    ld.w $a1, $a1, 4
+; LA32-UAL-NEXT:    srli.w $a3, $a0, 8
+; LA32-UAL-NEXT:    and $a3, $a3, $a2
+; LA32-UAL-NEXT:    srli.w $a4, $a0, 24
+; LA32-UAL-NEXT:    or $a3, $a3, $a4
+; LA32-UAL-NEXT:    and $a4, $a0, $a2
+; LA32-UAL-NEXT:    slli.w $a4, $a4, 8
+; LA32-UAL-NEXT:    slli.w $a0, $a0, 24
+; LA32-UAL-NEXT:    or $a0, $a0, $a4
+; LA32-UAL-NEXT:    or $a3, $a0, $a3
+; LA32-UAL-NEXT:    srli.w $a0, $a1, 8
+; LA32-UAL-NEXT:    and $a0, $a0, $a2
+; LA32-UAL-NEXT:    srli.w $a4, $a1, 24
+; LA32-UAL-NEXT:    or $a0, $a0, $a4
+; LA32-UAL-NEXT:    and $a2, $a1, $a2
+; LA32-UAL-NEXT:    slli.w $a2, $a2, 8
+; LA32-UAL-NEXT:    slli.w $a1, $a1, 24
+; LA32-UAL-NEXT:    or $a1, $a1, $a2
+; LA32-UAL-NEXT:    or $a4, $a1, $a0
+; LA32-UAL-NEXT:    bne $a3, $a4, .LBB29_3
+; LA32-UAL-NEXT:  # %bb.2:
+; LA32-UAL-NEXT:    move $a0, $zero
+; LA32-UAL-NEXT:    ret
+; LA32-UAL-NEXT:  .LBB29_3: # %res_block
+; LA32-UAL-NEXT:    sltu $a0, $a3, $a4
+; LA32-UAL-NEXT:    sub.w $a0, $zero, $a0
+; LA32-UAL-NEXT:    ori $a0, $a0, 1
+; LA32-UAL-NEXT:    ret
 ;
-; LA64-LABEL: memcmp_size_8:
-; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    addi.d $sp, $sp, -16
-; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    ori $a2, $zero, 8
-; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
-; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    ret
+; LA64-UAL-LABEL: memcmp_size_8:
+; LA64-UAL:       # %bb.0: # %entry
+; LA64-UAL-NEXT:    ld.d $a0, $a0, 0
+; LA64-UAL-NEXT:    ld.d $a1, $a1, 0
+; LA64-UAL-NEXT:    revb.d $a0, $a0
+; LA64-UAL-NEXT:    revb.d $a1, $a1
+; LA64-UAL-NEXT:    sltu $a2, $a0, $a1
+; LA64-UAL-NEXT:    sltu $a0, $a1, $a0
+; LA64-UAL-NEXT:    sub.d $a0, $a0, $a2
+; LA64-UAL-NEXT:    ret
+;
+; LA32-NUAL-LABEL: memcmp_size_8:
+; LA32-NUAL:       # %bb.0: # %entry
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, -16
+; LA32-NUAL-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NUAL-NEXT:    ori $a2, $zero, 8
+; LA32-NUAL-NEXT:    bl memcmp
+; LA32-NUAL-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, 16
+; LA32-NUAL-NEXT:    ret
+;
+; LA64-NUAL-LABEL: memcmp_size_8:
+; LA64-NUAL:       # %bb.0: # %entry
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, -16
+; LA64-NUAL-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NUAL-NEXT:    ori $a2, $zero, 8
+; LA64-NUAL-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NUAL-NEXT:    jirl $ra, $ra, 0
+; LA64-NUAL-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, 16
+; LA64-NUAL-NEXT:    ret
 entry:
   %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 8)
   ret i32 %memcmp
 }
 
 define i32 @memcmp_size_15(ptr %s1, ptr %s2) nounwind optsize {
-; LA32-LABEL: memcmp_size_15:
-; LA32:       # %bb.0: # %entry
-; LA32-NEXT:    addi.w $sp, $sp, -16
-; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    ori $a2, $zero, 15
-; LA32-NEXT:    bl memcmp
-; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
-; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    ret
+; LA32-UAL-LABEL: memcmp_size_15:
+; LA32-UAL:       # %bb.0: # %entry
+; LA32-UAL-NEXT:    ld.w $a3, $a0, 0
+; LA32-UAL-NEXT:    ld.w $a4, $a1, 0
+; LA32-UAL-NEXT:    srli.w $a5, $a3, 8
+; LA32-UAL-NEXT:    lu12i.w $a2, 15
+; LA32-UAL-NEXT:    ori $a2, $a2, 3840
+; LA32-UAL-NEXT:    and $a5, $a5, $a2
+; LA32-UAL-NEXT:    srli.w $a6, $a3, 24
+; LA32-UAL-NEXT:    or $a5, $a5, $a6
+; LA32-UAL-NEXT:    and $a6, $a3, $a2
+; LA32-UAL-NEXT:    slli.w $a6, $a6, 8
+; LA32-UAL-NEXT:    slli.w $a3, $a3, 24
+; LA32-UAL-NEXT:    or $a3, $a3, $a6
+; LA32-UAL-NEXT:    or $a3, $a3, $a5
+; LA32-UAL-NEXT:    srli.w $a5, $a4, 8
+; LA32-UAL-NEXT:    and $a5, $a5, $a2
+; LA32-UAL-NEXT:    srli.w $a6, $a4, 24
+; LA32-UAL-NEXT:    or $a5, $a5, $a6
+; LA32-UAL-NEXT:    and $a6, $a4, $a2
+; LA32-UAL-NEXT:    slli.w $a6, $a6, 8
+; LA32-UAL-NEXT:    slli.w $a4, $a4, 24
+; LA32-UAL-NEXT:    or $a4, $a4, $a6
+; LA32-UAL-NEXT:    or $a4, $a4, $a5
+; LA32-UAL-NEXT:    bne $a3, $a4, .LBB30_5
+; LA32-UAL-NEXT:  # %bb.1: # %loadbb1
+; LA32-UAL-NEXT:    ld.w $a3, $a0, 4
+; LA32-UAL-NEXT:    ld.w $a4, $a1, 4
+; LA32-UAL-NEXT:    srli.w $a5, $a3, 8
+; LA32-UAL-NEXT:    and $a5, $a5, $a2
+; LA32-UAL-NEXT:    srli.w $a6, $a3, 24
+; LA32-UAL-NEXT:    or $a5, $a5, $a6
+; LA32-UAL-NEXT:    and $a6, $a3, $a2
+; LA32-UAL-NEXT:    slli.w $a6, $a6, 8
+; LA32-UAL-NEXT:    slli.w $a3, $a3, 24
+; LA32-UAL-NEXT:    or $a3, $a3, $a6
+; LA32-UAL-NEXT:    or $a3, $a3, $a5
+; LA32-UAL-NEXT:    srli.w $a5, $a4, 8
+; LA32-UAL-NEXT:    and $a5, $a5, $a2
+; LA32-UAL-NEXT:    srli.w $a6, $a4, 24
+; LA32-UAL-NEXT:    or $a5, $a5, $a6
+; LA32-UAL-NEXT:    and $a6, $a4, $a2
+; LA32-UAL-NEXT:    slli.w $a6, $a6, 8
+; LA32-UAL-NEXT:    slli.w $a4, $a4, 24
+; LA32-UAL-NEXT:    or $a4, $a4, $a6
+; LA32-UAL-NEXT:    or $a4, $a4, $a5
+; LA32-UAL-NEXT:    bne $a3, $a4, .LBB30_5
+; LA32-UAL-NEXT:  # %bb.2: # %loadbb2
+; LA32-UAL-NEXT:    ld.w $a3, $a0, 8
+; LA32-UAL-NEXT:    ld.w $a4, $a1, 8
+; LA32-UAL-NEXT:    srli.w $a5, $a3, 8
+; LA32-UAL-NEXT:    and $a5, $a5, $a2
+; LA32-UAL-NEXT:    srli.w $a6, $a3, 24
+; LA32-UAL-NEXT:    or $a5, $a5, $a6
+; LA32-UAL-NEXT:    and $a6, $a3, $a2
+; LA32-UAL-NEXT:    slli.w $a6, $a6, 8
+; LA32-UAL-NEXT:    slli.w $a3, $a3, 24
+; LA32-UAL-NEXT:    or $a3, $a3, $a6
+; LA32-UAL-NEXT:    or $a3, $a3, $a5
+; LA32-UAL-NEXT:    srli.w $a5, $a4, 8
+; LA32-UAL-NEXT:    and $a5, $a5, $a2
+; LA32-UAL-NEXT:    srli.w $a6, $a4, 24
+; LA32-UAL-NEXT:    or $a5, $a5, $a6
+; LA32-UAL-NEXT:    and $a6, $a4, $a2
+; LA32-UAL-NEXT:    slli.w $a6, $a6, 8
+; LA32-UAL-NEXT:    slli.w $a4, $a4, 24
+; LA32-UAL-NEXT:    or $a4, $a4, $a6
+; LA32-UAL-NEXT:    or $a4, $a4, $a5
+; LA32-UAL-NEXT:    bne $a3, $a4, .LBB30_5
+; LA32-UAL-NEXT:  # %bb.3: # %loadbb3
+; LA32-UAL-NEXT:    ld.w $a0, $a0, 11
+; LA32-UAL-NEXT:    ld.w $a1, $a1, 11
+; LA32-UAL-NEXT:    srli.w $a3, $a0, 8
+; LA32-UAL-NEXT:    and $a3, $a3, $a2
+; LA32-UAL-NEXT:    srli.w $a4, $a0, 24
+; LA32-UAL-NEXT:    or $a3, $a3, $a4
+; LA32-UAL-NEXT:    and $a4, $a0, $a2
+; LA32-UAL-NEXT:    slli.w $a4, $a4, 8
+; LA32-UAL-NEXT:    slli.w $a0, $a0, 24
+; LA32-UAL-NEXT:    or $a0, $a0, $a4
+; LA32-UAL-NEXT:    or $a3, $a0, $a3
+; LA32-UAL-NEXT:    srli.w $a0, $a1, 8
+; LA32-UAL-NEXT:    and $a0, $a0, $a2
+; LA32-UAL-NEXT:    srli.w $a4, $a1, 24
+; LA32-UAL-NEXT:    or $a0, $a0, $a4
+; LA32-UAL-NEXT:    and $a2, $a1, $a2
+; LA32-UAL-NEXT:    slli.w $a2, $a2, 8
+; LA32-UAL-NEXT:    slli.w $a1, $a1, 24
+; LA32-UAL-NEXT:    or $a1, $a1, $a2
+; LA32-UAL-NEXT:    or $a4, $a1, $a0
+; LA32-UAL-NEXT:    bne $a3, $a4, .LBB30_5
+; LA32-UAL-NEXT:  # %bb.4:
+; LA32-UAL-NEXT:    move $a0, $zero
+; LA32-UAL-NEXT:    ret
+; LA32-UAL-NEXT:  .LBB30_5: # %res_block
+; LA32-UAL-NEXT:    sltu $a0, $a3, $a4
+; LA32-UAL-NEXT:    sub.w $a0, $zero, $a0
+; LA32-UAL-NEXT:    ori $a0, $a0, 1
+; LA32-UAL-NEXT:    ret
 ;
-; LA64-LABEL: memcmp_size_15:
-; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    addi.d $sp, $sp, -16
-; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    ori $a2, $zero, 15
-; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
-; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    ret
+; LA64-UAL-LABEL: memcmp_size_15:
+; LA64-UAL:       # %bb.0: # %entry
+; LA64-UAL-NEXT:    ld.d $a2, $a0, 0
+; LA64-UAL-NEXT:    ld.d $a3, $a1, 0
+; LA64-UAL-NEXT:    revb.d $a2, $a2
+; LA64-UAL-NEXT:    revb.d $a3, $a3
+; LA64-UAL-NEXT:    bne $a2, $a3, .LBB30_3
+; LA64-UAL-NEXT:  # %bb.1: # %loadbb1
+; LA64-UAL-NEXT:    ld.d $a0, $a0, 7
+; LA64-UAL-NEXT:    ld.d $a1, $a1, 7
+; LA64-UAL-NEXT:    revb.d $a2, $a0
+; LA64-UAL-NEXT:    revb.d $a3, $a1
+; LA64-UAL-NEXT:    bne $a2, $a3, .LBB30_3
+; LA64-UAL-NEXT:  # %bb.2:
+; LA64-UAL-NEXT:    move $a0, $zero
+; LA64-UAL-NEXT:    ret
+; LA64-UAL-NEXT:  .LBB30_3: # %res_block
+; LA64-UAL-NEXT:    sltu $a0, $a2, $a3
+; LA64-UAL-NEXT:    sub.d $a0, $zero, $a0
+; LA64-UAL-NEXT:    ori $a0, $a0, 1
+; LA64-UAL-NEXT:    ret
+;
+; LA32-NUAL-LABEL: memcmp_size_15:
+; LA32-NUAL:       # %bb.0: # %entry
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, -16
+; LA32-NUAL-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NUAL-NEXT:    ori $a2, $zero, 15
+; LA32-NUAL-NEXT:    bl memcmp
+; LA32-NUAL-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, 16
+; LA32-NUAL-NEXT:    ret
+;
+; LA64-NUAL-LABEL: memcmp_size_15:
+; LA64-NUAL:       # %bb.0: # %entry
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, -16
+; LA64-NUAL-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NUAL-NEXT:    ori $a2, $zero, 15
+; LA64-NUAL-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NUAL-NEXT:    jirl $ra, $ra, 0
+; LA64-NUAL-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, 16
+; LA64-NUAL-NEXT:    ret
 entry:
   %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 15)
   ret i32 %memcmp
 }
 
 define i32 @memcmp_size_16(ptr %s1, ptr %s2) nounwind optsize {
-; LA32-LABEL: memcmp_size_16:
-; LA32:       # %bb.0: # %entry
-; LA32-NEXT:    addi.w $sp, $sp, -16
-; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    ori $a2, $zero, 16
-; LA32-NEXT:    bl memcmp
-; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
-; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    ret
+; LA32-UAL-LABEL: memcmp_size_16:
+; LA32-UAL:       # %bb.0: # %entry
+; LA32-UAL-NEXT:    ld.w $a3, $a0, 0
+; LA32-UAL-NEXT:    ld.w $a4, $a1, 0
+; LA32-UAL-NEXT:    srli.w $a5, $a3, 8
+; LA32-UAL-NEXT:    lu12i.w $a2, 15
+; LA32-UAL-NEXT:    ori $a2, $a2, 3840
+; LA32-UAL-NEXT:    and $a5, $a5, $a2
+; LA32-UAL-NEXT:    srli.w $a6, $a3, 24
+; LA32-UAL-NEXT:    or $a5, $a5, $a6
+; LA32-UAL-NEXT:    and $a6, $a3, $a2
+; LA32-UAL-NEXT:    slli.w $a6, $a6, 8
+; LA32-UAL-NEXT:    slli.w $a3, $a3, 24
+; LA32-UAL-NEXT:    or $a3, $a3, $a6
+; LA32-UAL-NEXT:    or $a3, $a3, $a5
+; LA32-UAL-NEXT:    srli.w $a5, $a4, 8
+; LA32-UAL-NEXT:    and $a5, $a5, $a2
+; LA32-UAL-NEXT:    srli.w $a6, $a4, 24
+; LA32-UAL-NEXT:    or $a5, $a5, $a6
+; LA32-UAL-NEXT:    and $a6, $a4, $a2
+; LA32-UAL-NEXT:    slli.w $a6, $a6, 8
+; LA32-UAL-NEXT:    slli.w $a4, $a4, 24
+; LA32-UAL-NEXT:    or $a4, $a4, $a6
+; LA32-UAL-NEXT:    or $a4, $a4, $a5
+; LA32-UAL-NEXT:    bne $a3, $a4, .LBB31_5
+; LA32-UAL-NEXT:  # %bb.1: # %loadbb1
+; LA32-UAL-NEXT:    ld.w $a3, $a0, 4
+; LA32-UAL-NEXT:    ld.w $a4, $a1, 4
+; LA32-UAL-NEXT:    srli.w $a5, $a3, 8
+; LA32-UAL-NEXT:    and $a5, $a5, $a2
+; LA32-UAL-NEXT:    srli.w $a6, $a3, 24
+; LA32-UAL-NEXT:    or $a5, $a5, $a6
+; LA32-UAL-NEXT:    and $a6, $a3, $a2
+; LA32-UAL-NEXT:    slli.w $a6, $a6, 8
+; LA32-UAL-NEXT:    slli.w $a3, $a3, 24
+; LA32-UAL-NEXT:    or $a3, $a3, $a6
+; LA32-UAL-NEXT:    or $a3, $a3, $a5
+; LA32-UAL-NEXT:    srli.w $a5, $a4, 8
+; LA32-UAL-NEXT:    and $a5, $a5, $a2
+; LA32-UAL-NEXT:    srli.w $a6, $a4, 24
+; LA32-UAL-NEXT:    or $a5, $a5, $a6
+; LA32-UAL-NEXT:    and $a6, $a4, $a2
+; LA32-UAL-NEXT:    slli.w $a6, $a6, 8
+; LA32-UAL-NEXT:    slli.w $a4, $a4, 24
+; LA32-UAL-NEXT:    or $a4, $a4, $a6
+; LA32-UAL-NEXT:    or $a4, $a4, $a5
+; LA32-UAL-NEXT:    bne $a3, $a4, .LBB31_5
+; LA32-UAL-NEXT:  # %bb.2: # %loadbb2
+; LA32-UAL-NEXT:    ld.w $a3, $a0, 8
+; LA32-UAL-NEXT:    ld.w $a4, $a1, 8
+; LA32-UAL-NEXT:    srli.w $a5, $a3, 8
+; LA32-UAL-NEXT:    and $a5, $a5, $a2
+; LA32-UAL-NEXT:    srli.w $a6, $a3, 24
+; LA32-UAL-NEXT:    or $a5, $a5, $a6
+; LA32-UAL-NEXT:    and $a6, $a3, $a2
+; LA32-UAL-NEXT:    slli.w $a6, $a6, 8
+; LA32-UAL-NEXT:    slli.w $a3, $a3, 24
+; LA32-UAL-NEXT:    or $a3, $a3, $a6
+; LA32-UAL-NEXT:    or $a3, $a3, $a5
+; LA32-UAL-NEXT:    srli.w $a5, $a4, 8
+; LA32-UAL-NEXT:    and $a5, $a5, $a2
+; LA32-UAL-NEXT:    srli.w $a6, $a4, 24
+; LA32-UAL-NEXT:    or $a5, $a5, $a6
+; LA32-UAL-NEXT:    and $a6, $a4, $a2
+; LA32-UAL-NEXT:    slli.w $a6, $a6, 8
+; LA32-UAL-NEXT:    slli.w $a4, $a4, 24
+; LA32-UAL-NEXT:    or $a4, $a4, $a6
+; LA32-UAL-NEXT:    or $a4, $a4, $a5
+; LA32-UAL-NEXT:    bne $a3, $a4, .LBB31_5
+; LA32-UAL-NEXT:  # %bb.3: # %loadbb3
+; LA32-UAL-NEXT:    ld.w $a0, $a0, 12
+; LA32-UAL-NEXT:    ld.w $a1, $a1, 12
+; LA32-UAL-NEXT:    srli.w $a3, $a0, 8
+; LA32-UAL-NEXT:    and $a3, $a3, $a2
+; LA32-UAL-NEXT:    srli.w $a4, $a0, 24
+; LA32-UAL-NEXT:    or $a3, $a3, $a4
+; LA32-UAL-NEXT:    and $a4, $a0, $a2
+; LA32-UAL-NEXT:    slli.w $a4, $a4, 8
+; LA32-UAL-NEXT:    slli.w $a0, $a0, 24
+; LA32-UAL-NEXT:    or $a0, $a0, $a4
+; LA32-UAL-NEXT:    or $a3, $a0, $a3
+; LA32-UAL-NEXT:    srli.w $a0, $a1, 8
+; LA32-UAL-NEXT:    and $a0, $a0, $a2
+; LA32-UAL-NEXT:    srli.w $a4, $a1, 24
+; LA32-UAL-NEXT:    or $a0, $a0, $a4
+; LA32-UAL-NEXT:    and $a2, $a1, $a2
+; LA32-UAL-NEXT:    slli.w $a2, $a2, 8
+; LA32-UAL-NEXT:    slli.w $a1, $a1, 24
+; LA32-UAL-NEXT:    or $a1, $a1, $a2
+; LA32-UAL-NEXT:    or $a4, $a1, $a0
+; LA32-UAL-NEXT:    bne $a3, $a4, .LBB31_5
+; LA32-UAL-NEXT:  # %bb.4:
+; LA32-UAL-NEXT:    move $a0, $zero
+; LA32-UAL-NEXT:    ret
+; LA32-UAL-NEXT:  .LBB31_5: # %res_block
+; LA32-UAL-NEXT:    sltu $a0, $a3, $a4
+; LA32-UAL-NEXT:    sub.w $a0, $zero, $a0
+; LA32-UAL-NEXT:    ori $a0, $a0, 1
+; LA32-UAL-NEXT:    ret
 ;
-; LA64-LABEL: memcmp_size_16:
-; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    addi.d $sp, $sp, -16
-; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    ori $a2, $zero, 16
-; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
-; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    ret
+; LA64-UAL-LABEL: memcmp_size_16:
+; LA64-UAL:       # %bb.0: # %entry
+; LA64-UAL-NEXT:    ld.d $a2, $a0, 0
+; LA64-UAL-NEXT:    ld.d $a3, $a1, 0
+; LA64-UAL-NEXT:    revb.d $a2, $a2
+; LA64-UAL-NEXT:    revb.d $a3, $a3
+; LA64-UAL-NEXT:    bne $a2, $a3, .LBB31_3
+; LA64-UAL-NEXT:  # %bb.1: # %loadbb1
+; LA64-UAL-NEXT:    ld.d $a0, $a0, 8
+; LA64-UAL-NEXT:    ld.d $a1, $a1, 8
+; LA64-UAL-NEXT:    revb.d $a2, $a0
+; LA64-UAL-NEXT:    revb.d $a3, $a1
+; LA64-UAL-NEXT:    bne $a2, $a3, .LBB31_3
+; LA64-UAL-NEXT:  # %bb.2:
+; LA64-UAL-NEXT:    move $a0, $zero
+; LA64-UAL-NEXT:    ret
+; LA64-UAL-NEXT:  .LBB31_3: # %res_block
+; LA64-UAL-NEXT:    sltu $a0, $a2, $a3
+; LA64-UAL-NEXT:    sub.d $a0, $zero, $a0
+; LA64-UAL-NEXT:    ori $a0, $a0, 1
+; LA64-UAL-NEXT:    ret
+;
+; LA32-NUAL-LABEL: memcmp_size_16:
+; LA32-NUAL:       # %bb.0: # %entry
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, -16
+; LA32-NUAL-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NUAL-NEXT:    ori $a2, $zero, 16
+; LA32-NUAL-NEXT:    bl memcmp
+; LA32-NUAL-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, 16
+; LA32-NUAL-NEXT:    ret
+;
+; LA64-NUAL-LABEL: memcmp_size_16:
+; LA64-NUAL:       # %bb.0: # %entry
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, -16
+; LA64-NUAL-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NUAL-NEXT:    ori $a2, $zero, 16
+; LA64-NUAL-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NUAL-NEXT:    jirl $ra, $ra, 0
+; LA64-NUAL-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, 16
+; LA64-NUAL-NEXT:    ret
 entry:
   %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 16)
   ret i32 %memcmp
@@ -845,16 +1811,50 @@ define i32 @memcmp_size_31(ptr %s1, ptr %s2) nounwind optsize {
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
 ;
-; LA64-LABEL: memcmp_size_31:
-; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    addi.d $sp, $sp, -16
-; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    ori $a2, $zero, 31
-; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
-; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    ret
+; LA64-UAL-LABEL: memcmp_size_31:
+; LA64-UAL:       # %bb.0: # %entry
+; LA64-UAL-NEXT:    ld.d $a2, $a0, 0
+; LA64-UAL-NEXT:    ld.d $a3, $a1, 0
+; LA64-UAL-NEXT:    revb.d $a2, $a2
+; LA64-UAL-NEXT:    revb.d $a3, $a3
+; LA64-UAL-NEXT:    bne $a2, $a3, .LBB32_5
+; LA64-UAL-NEXT:  # %bb.1: # %loadbb1
+; LA64-UAL-NEXT:    ld.d $a2, $a0, 8
+; LA64-UAL-NEXT:    ld.d $a3, $a1, 8
+; LA64-UAL-NEXT:    revb.d $a2, $a2
+; LA64-UAL-NEXT:    revb.d $a3, $a3
+; LA64-UAL-NEXT:    bne $a2, $a3, .LBB32_5
+; LA64-UAL-NEXT:  # %bb.2: # %loadbb2
+; LA64-UAL-NEXT:    ld.d $a2, $a0, 16
+; LA64-UAL-NEXT:    ld.d $a3, $a1, 16
+; LA64-UAL-NEXT:    revb.d $a2, $a2
+; LA64-UAL-NEXT:    revb.d $a3, $a3
+; LA64-UAL-NEXT:    bne $a2, $a3, .LBB32_5
+; LA64-UAL-NEXT:  # %bb.3: # %loadbb3
+; LA64-UAL-NEXT:    ld.d $a0, $a0, 23
+; LA64-UAL-NEXT:    ld.d $a1, $a1, 23
+; LA64-UAL-NEXT:    revb.d $a2, $a0
+; LA64-UAL-NEXT:    revb.d $a3, $a1
+; LA64-UAL-NEXT:    bne $a2, $a3, .LBB32_5
+; LA64-UAL-NEXT:  # %bb.4:
+; LA64-UAL-NEXT:    move $a0, $zero
+; LA64-UAL-NEXT:    ret
+; LA64-UAL-NEXT:  .LBB32_5: # %res_block
+; LA64-UAL-NEXT:    sltu $a0, $a2, $a3
+; LA64-UAL-NEXT:    sub.d $a0, $zero, $a0
+; LA64-UAL-NEXT:    ori $a0, $a0, 1
+; LA64-UAL-NEXT:    ret
+;
+; LA64-NUAL-LABEL: memcmp_size_31:
+; LA64-NUAL:       # %bb.0: # %entry
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, -16
+; LA64-NUAL-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NUAL-NEXT:    ori $a2, $zero, 31
+; LA64-NUAL-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NUAL-NEXT:    jirl $ra, $ra, 0
+; LA64-NUAL-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, 16
+; LA64-NUAL-NEXT:    ret
 entry:
   %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 31)
   ret i32 %memcmp
@@ -871,16 +1871,50 @@ define i32 @memcmp_size_32(ptr %s1, ptr %s2) nounwind optsize {
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
 ;
-; LA64-LABEL: memcmp_size_32:
-; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    addi.d $sp, $sp, -16
-; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    ori $a2, $zero, 32
-; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
-; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    ret
+; LA64-UAL-LABEL: memcmp_size_32:
+; LA64-UAL:       # %bb.0: # %entry
+; LA64-UAL-NEXT:    ld.d $a2, $a0, 0
+; LA64-UAL-NEXT:    ld.d $a3, $a1, 0
+; LA64-UAL-NEXT:    revb.d $a2, $a2
+; LA64-UAL-NEXT:    revb.d $a3, $a3
+; LA64-UAL-NEXT:    bne $a2, $a3, .LBB33_5
+; LA64-UAL-NEXT:  # %bb.1: # %loadbb1
+; LA64-UAL-NEXT:    ld.d $a2, $a0, 8
+; LA64-UAL-NEXT:    ld.d $a3, $a1, 8
+; LA64-UAL-NEXT:    revb.d $a2, $a2
+; LA64-UAL-NEXT:    revb.d $a3, $a3
+; LA64-UAL-NEXT:    bne $a2, $a3, .LBB33_5
+; LA64-UAL-NEXT:  # %bb.2: # %loadbb2
+; LA64-UAL-NEXT:    ld.d $a2, $a0, 16
+; LA64-UAL-NEXT:    ld.d $a3, $a1, 16
+; LA64-UAL-NEXT:    revb.d $a2, $a2
+; LA64-UAL-NEXT:    revb.d $a3, $a3
+; LA64-UAL-NEXT:    bne $a2, $a3, .LBB33_5
+; LA64-UAL-NEXT:  # %bb.3: # %loadbb3
+; LA64-UAL-NEXT:    ld.d $a0, $a0, 24
+; LA64-UAL-NEXT:    ld.d $a1, $a1, 24
+; LA64-UAL-NEXT:    revb.d $a2, $a0
+; LA64-UAL-NEXT:    revb.d $a3, $a1
+; LA64-UAL-NEXT:    bne $a2, $a3, .LBB33_5
+; LA64-UAL-NEXT:  # %bb.4:
+; LA64-UAL-NEXT:    move $a0, $zero
+; LA64-UAL-NEXT:    ret
+; LA64-UAL-NEXT:  .LBB33_5: # %res_block
+; LA64-UAL-NEXT:    sltu $a0, $a2, $a3
+; LA64-UAL-NEXT:    sub.d $a0, $zero, $a0
+; LA64-UAL-NEXT:    ori $a0, $a0, 1
+; LA64-UAL-NEXT:    ret
+;
+; LA64-NUAL-LABEL: memcmp_size_32:
+; LA64-NUAL:       # %bb.0: # %entry
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, -16
+; LA64-NUAL-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NUAL-NEXT:    ori $a2, $zero, 32
+; LA64-NUAL-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NUAL-NEXT:    jirl $ra, $ra, 0
+; LA64-NUAL-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, 16
+; LA64-NUAL-NEXT:    ret
 entry:
   %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 32)
   ret i32 %memcmp
@@ -1017,16 +2051,16 @@ entry:
 define i1 @memcmp_eq_zero(ptr %s1, ptr %s2) nounwind optsize {
 ; LA32-UAL-LABEL: memcmp_eq_zero:
 ; LA32-UAL:       # %bb.0: # %entry
-; LA32-UAL-NEXT:    ld.w $a1, $a1, 0
 ; LA32-UAL-NEXT:    ld.w $a0, $a0, 0
+; LA32-UAL-NEXT:    ld.w $a1, $a1, 0
 ; LA32-UAL-NEXT:    xor $a0, $a0, $a1
 ; LA32-UAL-NEXT:    sltui $a0, $a0, 1
 ; LA32-UAL-NEXT:    ret
 ;
 ; LA64-UAL-LABEL: memcmp_eq_zero:
 ; LA64-UAL:       # %bb.0: # %entry
-; LA64-UAL-NEXT:    ld.w $a1, $a1, 0
 ; LA64-UAL-NEXT:    ld.w $a0, $a0, 0
+; LA64-UAL-NEXT:    ld.w $a1, $a1, 0
 ; LA64-UAL-NEXT:    xor $a0, $a0, $a1
 ; LA64-UAL-NEXT:    sltui $a0, $a0, 1
 ; LA64-UAL-NEXT:    ret
@@ -1089,28 +2123,66 @@ entry:
 }
 
 define i1 @memcmp_lt_zero(ptr %s1, ptr %s2) nounwind optsize {
-; LA32-LABEL: memcmp_lt_zero:
-; LA32:       # %bb.0: # %entry
-; LA32-NEXT:    addi.w $sp, $sp, -16
-; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    ori $a2, $zero, 4
-; LA32-NEXT:    bl memcmp
-; LA32-NEXT:    srli.w $a0, $a0, 31
-; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
-; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    ret
+; LA32-UAL-LABEL: memcmp_lt_zero:
+; LA32-UAL:       # %bb.0: # %entry
+; LA32-UAL-NEXT:    ld.w $a0, $a0, 0
+; LA32-UAL-NEXT:    ld.w $a1, $a1, 0
+; LA32-UAL-NEXT:    srli.w $a2, $a0, 8
+; LA32-UAL-NEXT:    lu12i.w $a3, 15
+; LA32-UAL-NEXT:    ori $a3, $a3, 3840
+; LA32-UAL-NEXT:    and $a2, $a2, $a3
+; LA32-UAL-NEXT:    srli.w $a4, $a0, 24
+; LA32-UAL-NEXT:    or $a2, $a2, $a4
+; LA32-UAL-NEXT:    and $a4, $a0, $a3
+; LA32-UAL-NEXT:    slli.w $a4, $a4, 8
+; LA32-UAL-NEXT:    slli.w $a0, $a0, 24
+; LA32-UAL-NEXT:    or $a0, $a0, $a4
+; LA32-UAL-NEXT:    or $a0, $a0, $a2
+; LA32-UAL-NEXT:    srli.w $a2, $a1, 8
+; LA32-UAL-NEXT:    and $a2, $a2, $a3
+; LA32-UAL-NEXT:    srli.w $a4, $a1, 24
+; LA32-UAL-NEXT:    or $a2, $a2, $a4
+; LA32-UAL-NEXT:    and $a3, $a1, $a3
+; LA32-UAL-NEXT:    slli.w $a3, $a3, 8
+; LA32-UAL-NEXT:    slli.w $a1, $a1, 24
+; LA32-UAL-NEXT:    or $a1, $a1, $a3
+; LA32-UAL-NEXT:    or $a1, $a1, $a2
+; LA32-UAL-NEXT:    sltu $a0, $a0, $a1
+; LA32-UAL-NEXT:    ret
 ;
-; LA64-LABEL: memcmp_lt_zero:
-; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    addi.d $sp, $sp, -16
-; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    ori $a2, $zero, 4
-; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
-; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    slti $a0, $a0, 0
-; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    ret
+; LA64-UAL-LABEL: memcmp_lt_zero:
+; LA64-UAL:       # %bb.0: # %entry
+; LA64-UAL-NEXT:    ld.w $a0, $a0, 0
+; LA64-UAL-NEXT:    ld.w $a1, $a1, 0
+; LA64-UAL-NEXT:    revb.2w $a0, $a0
+; LA64-UAL-NEXT:    addi.w $a0, $a0, 0
+; LA64-UAL-NEXT:    revb.2w $a1, $a1
+; LA64-UAL-NEXT:    addi.w $a1, $a1, 0
+; LA64-UAL-NEXT:    sltu $a0, $a0, $a1
+; LA64-UAL-NEXT:    ret
+;
+; LA32-NUAL-LABEL: memcmp_lt_zero:
+; LA32-NUAL:       # %bb.0: # %entry
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, -16
+; LA32-NUAL-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NUAL-NEXT:    ori $a2, $zero, 4
+; LA32-NUAL-NEXT:    bl memcmp
+; LA32-NUAL-NEXT:    srli.w $a0, $a0, 31
+; LA32-NUAL-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, 16
+; LA32-NUAL-NEXT:    ret
+;
+; LA64-NUAL-LABEL: memcmp_lt_zero:
+; LA64-NUAL:       # %bb.0: # %entry
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, -16
+; LA64-NUAL-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NUAL-NEXT:    ori $a2, $zero, 4
+; LA64-NUAL-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NUAL-NEXT:    jirl $ra, $ra, 0
+; LA64-NUAL-NEXT:    slti $a0, $a0, 0
+; LA64-NUAL-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, 16
+; LA64-NUAL-NEXT:    ret
 entry:
   %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 4)
   %ret = icmp slt i32 %memcmp, 0
@@ -1118,28 +2190,66 @@ entry:
 }
 
 define i1 @memcmp_gt_zero(ptr %s1, ptr %s2) nounwind optsize {
-; LA32-LABEL: memcmp_gt_zero:
-; LA32:       # %bb.0: # %entry
-; LA32-NEXT:    addi.w $sp, $sp, -16
-; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    ori $a2, $zero, 4
-; LA32-NEXT:    bl memcmp
-; LA32-NEXT:    slt $a0, $zero, $a0
-; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
-; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    ret
+; LA32-UAL-LABEL: memcmp_gt_zero:
+; LA32-UAL:       # %bb.0: # %entry
+; LA32-UAL-NEXT:    ld.w $a0, $a0, 0
+; LA32-UAL-NEXT:    ld.w $a1, $a1, 0
+; LA32-UAL-NEXT:    srli.w $a2, $a0, 8
+; LA32-UAL-NEXT:    lu12i.w $a3, 15
+; LA32-UAL-NEXT:    ori $a3, $a3, 3840
+; LA32-UAL-NEXT:    and $a2, $a2, $a3
+; LA32-UAL-NEXT:    srli.w $a4, $a0, 24
+; LA32-UAL-NEXT:    or $a2, $a2, $a4
+; LA32-UAL-NEXT:    and $a4, $a0, $a3
+; LA32-UAL-NEXT:    slli.w $a4, $a4, 8
+; LA32-UAL-NEXT:    slli.w $a0, $a0, 24
+; LA32-UAL-NEXT:    or $a0, $a0, $a4
+; LA32-UAL-NEXT:    or $a0, $a0, $a2
+; LA32-UAL-NEXT:    srli.w $a2, $a1, 8
+; LA32-UAL-NEXT:    and $a2, $a2, $a3
+; LA32-UAL-NEXT:    srli.w $a4, $a1, 24
+; LA32-UAL-NEXT:    or $a2, $a2, $a4
+; LA32-UAL-NEXT:    and $a3, $a1, $a3
+; LA32-UAL-NEXT:    slli.w $a3, $a3, 8
+; LA32-UAL-NEXT:    slli.w $a1, $a1, 24
+; LA32-UAL-NEXT:    or $a1, $a1, $a3
+; LA32-UAL-NEXT:    or $a1, $a1, $a2
+; LA32-UAL-NEXT:    sltu $a0, $a1, $a0
+; LA32-UAL-NEXT:    ret
 ;
-; LA64-LABEL: memcmp_gt_zero:
-; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    addi.d $sp, $sp, -16
-; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    ori $a2, $zero, 4
-; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
-; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    slt $a0, $zero, $a0
-; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    ret
+; LA64-UAL-LABEL: memcmp_gt_zero:
+; LA64-UAL:       # %bb.0: # %entry
+; LA64-UAL-NEXT:    ld.w $a0, $a0, 0
+; LA64-UAL-NEXT:    ld.w $a1, $a1, 0
+; LA64-UAL-NEXT:    revb.2w $a0, $a0
+; LA64-UAL-NEXT:    addi.w $a0, $a0, 0
+; LA64-UAL-NEXT:    revb.2w $a1, $a1
+; LA64-UAL-NEXT:    addi.w $a1, $a1, 0
+; LA64-UAL-NEXT:    sltu $a0, $a1, $a0
+; LA64-UAL-NEXT:    ret
+;
+; LA32-NUAL-LABEL: memcmp_gt_zero:
+; LA32-NUAL:       # %bb.0: # %entry
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, -16
+; LA32-NUAL-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NUAL-NEXT:    ori $a2, $zero, 4
+; LA32-NUAL-NEXT:    bl memcmp
+; LA32-NUAL-NEXT:    slt $a0, $zero, $a0
+; LA32-NUAL-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, 16
+; LA32-NUAL-NEXT:    ret
+;
+; LA64-NUAL-LABEL: memcmp_gt_zero:
+; LA64-NUAL:       # %bb.0: # %entry
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, -16
+; LA64-NUAL-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NUAL-NEXT:    ori $a2, $zero, 4
+; LA64-NUAL-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NUAL-NEXT:    jirl $ra, $ra, 0
+; LA64-NUAL-NEXT:    slt $a0, $zero, $a0
+; LA64-NUAL-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, 16
+; LA64-NUAL-NEXT:    ret
 entry:
   %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 4)
   %ret = icmp sgt i32 %memcmp, 0
diff --git a/llvm/test/CodeGen/LoongArch/expandmemcmp.ll b/llvm/test/CodeGen/LoongArch/expandmemcmp.ll
index 407ff1995cf8e..7772ed59081b0 100644
--- a/llvm/test/CodeGen/LoongArch/expandmemcmp.ll
+++ b/llvm/test/CodeGen/LoongArch/expandmemcmp.ll
@@ -38,312 +38,652 @@ entry:
 }
 
 define i32 @bcmp_size_1(ptr %s1, ptr %s2) nounwind {
-; LA32-LABEL: bcmp_size_1:
-; LA32:       # %bb.0: # %entry
-; LA32-NEXT:    addi.w $sp, $sp, -16
-; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    ori $a2, $zero, 1
-; LA32-NEXT:    bl bcmp
-; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
-; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    ret
+; LA32-UAL-LABEL: bcmp_size_1:
+; LA32-UAL:       # %bb.0: # %entry
+; LA32-UAL-NEXT:    ld.bu $a0, $a0, 0
+; LA32-UAL-NEXT:    ld.bu $a1, $a1, 0
+; LA32-UAL-NEXT:    xor $a0, $a0, $a1
+; LA32-UAL-NEXT:    sltu $a0, $zero, $a0
+; LA32-UAL-NEXT:    ret
 ;
-; LA64-LABEL: bcmp_size_1:
-; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    addi.d $sp, $sp, -16
-; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    ori $a2, $zero, 1
-; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
-; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    ret
+; LA64-UAL-LABEL: bcmp_size_1:
+; LA64-UAL:       # %bb.0: # %entry
+; LA64-UAL-NEXT:    ld.bu $a0, $a0, 0
+; LA64-UAL-NEXT:    ld.bu $a1, $a1, 0
+; LA64-UAL-NEXT:    xor $a0, $a0, $a1
+; LA64-UAL-NEXT:    sltu $a0, $zero, $a0
+; LA64-UAL-NEXT:    ret
+;
+; LA32-NUAL-LABEL: bcmp_size_1:
+; LA32-NUAL:       # %bb.0: # %entry
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, -16
+; LA32-NUAL-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NUAL-NEXT:    ori $a2, $zero, 1
+; LA32-NUAL-NEXT:    bl bcmp
+; LA32-NUAL-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, 16
+; LA32-NUAL-NEXT:    ret
+;
+; LA64-NUAL-LABEL: bcmp_size_1:
+; LA64-NUAL:       # %bb.0: # %entry
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, -16
+; LA64-NUAL-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NUAL-NEXT:    ori $a2, $zero, 1
+; LA64-NUAL-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NUAL-NEXT:    jirl $ra, $ra, 0
+; LA64-NUAL-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, 16
+; LA64-NUAL-NEXT:    ret
 entry:
   %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 1)
   ret i32 %bcmp
 }
 
 define i32 @bcmp_size_2(ptr %s1, ptr %s2) nounwind {
-; LA32-LABEL: bcmp_size_2:
-; LA32:       # %bb.0: # %entry
-; LA32-NEXT:    addi.w $sp, $sp, -16
-; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    ori $a2, $zero, 2
-; LA32-NEXT:    bl bcmp
-; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
-; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    ret
+; LA32-UAL-LABEL: bcmp_size_2:
+; LA32-UAL:       # %bb.0: # %entry
+; LA32-UAL-NEXT:    ld.hu $a0, $a0, 0
+; LA32-UAL-NEXT:    ld.hu $a1, $a1, 0
+; LA32-UAL-NEXT:    xor $a0, $a0, $a1
+; LA32-UAL-NEXT:    sltu $a0, $zero, $a0
+; LA32-UAL-NEXT:    ret
 ;
-; LA64-LABEL: bcmp_size_2:
-; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    addi.d $sp, $sp, -16
-; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    ori $a2, $zero, 2
-; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
-; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    ret
+; LA64-UAL-LABEL: bcmp_size_2:
+; LA64-UAL:       # %bb.0: # %entry
+; LA64-UAL-NEXT:    ld.hu $a0, $a0, 0
+; LA64-UAL-NEXT:    ld.hu $a1, $a1, 0
+; LA64-UAL-NEXT:    xor $a0, $a0, $a1
+; LA64-UAL-NEXT:    sltu $a0, $zero, $a0
+; LA64-UAL-NEXT:    ret
+;
+; LA32-NUAL-LABEL: bcmp_size_2:
+; LA32-NUAL:       # %bb.0: # %entry
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, -16
+; LA32-NUAL-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NUAL-NEXT:    ori $a2, $zero, 2
+; LA32-NUAL-NEXT:    bl bcmp
+; LA32-NUAL-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, 16
+; LA32-NUAL-NEXT:    ret
+;
+; LA64-NUAL-LABEL: bcmp_size_2:
+; LA64-NUAL:       # %bb.0: # %entry
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, -16
+; LA64-NUAL-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NUAL-NEXT:    ori $a2, $zero, 2
+; LA64-NUAL-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NUAL-NEXT:    jirl $ra, $ra, 0
+; LA64-NUAL-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, 16
+; LA64-NUAL-NEXT:    ret
 entry:
   %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 2)
   ret i32 %bcmp
 }
 
 define i32 @bcmp_size_3(ptr %s1, ptr %s2) nounwind {
-; LA32-LABEL: bcmp_size_3:
-; LA32:       # %bb.0: # %entry
-; LA32-NEXT:    addi.w $sp, $sp, -16
-; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    ori $a2, $zero, 3
-; LA32-NEXT:    bl bcmp
-; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
-; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    ret
+; LA32-UAL-LABEL: bcmp_size_3:
+; LA32-UAL:       # %bb.0: # %entry
+; LA32-UAL-NEXT:    ld.hu $a2, $a0, 0
+; LA32-UAL-NEXT:    ld.hu $a3, $a1, 0
+; LA32-UAL-NEXT:    ld.bu $a0, $a0, 2
+; LA32-UAL-NEXT:    ld.bu $a1, $a1, 2
+; LA32-UAL-NEXT:    xor $a2, $a2, $a3
+; LA32-UAL-NEXT:    xor $a0, $a0, $a1
+; LA32-UAL-NEXT:    or $a0, $a2, $a0
+; LA32-UAL-NEXT:    sltu $a0, $zero, $a0
+; LA32-UAL-NEXT:    ret
 ;
-; LA64-LABEL: bcmp_size_3:
-; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    addi.d $sp, $sp, -16
-; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    ori $a2, $zero, 3
-; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
-; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    ret
+; LA64-UAL-LABEL: bcmp_size_3:
+; LA64-UAL:       # %bb.0: # %entry
+; LA64-UAL-NEXT:    ld.hu $a2, $a0, 0
+; LA64-UAL-NEXT:    ld.hu $a3, $a1, 0
+; LA64-UAL-NEXT:    ld.bu $a0, $a0, 2
+; LA64-UAL-NEXT:    ld.bu $a1, $a1, 2
+; LA64-UAL-NEXT:    xor $a2, $a2, $a3
+; LA64-UAL-NEXT:    xor $a0, $a0, $a1
+; LA64-UAL-NEXT:    or $a0, $a2, $a0
+; LA64-UAL-NEXT:    sltu $a0, $zero, $a0
+; LA64-UAL-NEXT:    ret
+;
+; LA32-NUAL-LABEL: bcmp_size_3:
+; LA32-NUAL:       # %bb.0: # %entry
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, -16
+; LA32-NUAL-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NUAL-NEXT:    ori $a2, $zero, 3
+; LA32-NUAL-NEXT:    bl bcmp
+; LA32-NUAL-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, 16
+; LA32-NUAL-NEXT:    ret
+;
+; LA64-NUAL-LABEL: bcmp_size_3:
+; LA64-NUAL:       # %bb.0: # %entry
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, -16
+; LA64-NUAL-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NUAL-NEXT:    ori $a2, $zero, 3
+; LA64-NUAL-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NUAL-NEXT:    jirl $ra, $ra, 0
+; LA64-NUAL-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, 16
+; LA64-NUAL-NEXT:    ret
 entry:
   %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 3)
   ret i32 %bcmp
 }
 
 define i32 @bcmp_size_4(ptr %s1, ptr %s2) nounwind {
-; LA32-LABEL: bcmp_size_4:
-; LA32:       # %bb.0: # %entry
-; LA32-NEXT:    addi.w $sp, $sp, -16
-; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    ori $a2, $zero, 4
-; LA32-NEXT:    bl bcmp
-; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
-; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    ret
+; LA32-UAL-LABEL: bcmp_size_4:
+; LA32-UAL:       # %bb.0: # %entry
+; LA32-UAL-NEXT:    ld.w $a0, $a0, 0
+; LA32-UAL-NEXT:    ld.w $a1, $a1, 0
+; LA32-UAL-NEXT:    xor $a0, $a0, $a1
+; LA32-UAL-NEXT:    sltu $a0, $zero, $a0
+; LA32-UAL-NEXT:    ret
 ;
-; LA64-LABEL: bcmp_size_4:
-; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    addi.d $sp, $sp, -16
-; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    ori $a2, $zero, 4
-; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
-; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    ret
+; LA64-UAL-LABEL: bcmp_size_4:
+; LA64-UAL:       # %bb.0: # %entry
+; LA64-UAL-NEXT:    ld.w $a0, $a0, 0
+; LA64-UAL-NEXT:    ld.w $a1, $a1, 0
+; LA64-UAL-NEXT:    xor $a0, $a0, $a1
+; LA64-UAL-NEXT:    sltu $a0, $zero, $a0
+; LA64-UAL-NEXT:    ret
+;
+; LA32-NUAL-LABEL: bcmp_size_4:
+; LA32-NUAL:       # %bb.0: # %entry
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, -16
+; LA32-NUAL-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NUAL-NEXT:    ori $a2, $zero, 4
+; LA32-NUAL-NEXT:    bl bcmp
+; LA32-NUAL-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, 16
+; LA32-NUAL-NEXT:    ret
+;
+; LA64-NUAL-LABEL: bcmp_size_4:
+; LA64-NUAL:       # %bb.0: # %entry
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, -16
+; LA64-NUAL-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NUAL-NEXT:    ori $a2, $zero, 4
+; LA64-NUAL-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NUAL-NEXT:    jirl $ra, $ra, 0
+; LA64-NUAL-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, 16
+; LA64-NUAL-NEXT:    ret
 entry:
   %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 4)
   ret i32 %bcmp
 }
 
 define i32 @bcmp_size_5(ptr %s1, ptr %s2) nounwind {
-; LA32-LABEL: bcmp_size_5:
-; LA32:       # %bb.0: # %entry
-; LA32-NEXT:    addi.w $sp, $sp, -16
-; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    ori $a2, $zero, 5
-; LA32-NEXT:    bl bcmp
-; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
-; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    ret
+; LA32-UAL-LABEL: bcmp_size_5:
+; LA32-UAL:       # %bb.0: # %entry
+; LA32-UAL-NEXT:    ld.w $a2, $a0, 0
+; LA32-UAL-NEXT:    ld.w $a3, $a1, 0
+; LA32-UAL-NEXT:    ld.bu $a0, $a0, 4
+; LA32-UAL-NEXT:    ld.bu $a1, $a1, 4
+; LA32-UAL-NEXT:    xor $a2, $a2, $a3
+; LA32-UAL-NEXT:    xor $a0, $a0, $a1
+; LA32-UAL-NEXT:    or $a0, $a2, $a0
+; LA32-UAL-NEXT:    sltu $a0, $zero, $a0
+; LA32-UAL-NEXT:    ret
 ;
-; LA64-LABEL: bcmp_size_5:
-; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    addi.d $sp, $sp, -16
-; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    ori $a2, $zero, 5
-; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
-; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    ret
+; LA64-UAL-LABEL: bcmp_size_5:
+; LA64-UAL:       # %bb.0: # %entry
+; LA64-UAL-NEXT:    ld.w $a2, $a0, 0
+; LA64-UAL-NEXT:    ld.w $a3, $a1, 0
+; LA64-UAL-NEXT:    ld.bu $a0, $a0, 4
+; LA64-UAL-NEXT:    ld.bu $a1, $a1, 4
+; LA64-UAL-NEXT:    xor $a2, $a2, $a3
+; LA64-UAL-NEXT:    xor $a0, $a0, $a1
+; LA64-UAL-NEXT:    or $a0, $a2, $a0
+; LA64-UAL-NEXT:    sltu $a0, $zero, $a0
+; LA64-UAL-NEXT:    ret
+;
+; LA32-NUAL-LABEL: bcmp_size_5:
+; LA32-NUAL:       # %bb.0: # %entry
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, -16
+; LA32-NUAL-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NUAL-NEXT:    ori $a2, $zero, 5
+; LA32-NUAL-NEXT:    bl bcmp
+; LA32-NUAL-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, 16
+; LA32-NUAL-NEXT:    ret
+;
+; LA64-NUAL-LABEL: bcmp_size_5:
+; LA64-NUAL:       # %bb.0: # %entry
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, -16
+; LA64-NUAL-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NUAL-NEXT:    ori $a2, $zero, 5
+; LA64-NUAL-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NUAL-NEXT:    jirl $ra, $ra, 0
+; LA64-NUAL-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, 16
+; LA64-NUAL-NEXT:    ret
 entry:
   %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 5)
   ret i32 %bcmp
 }
 
 define i32 @bcmp_size_6(ptr %s1, ptr %s2) nounwind {
-; LA32-LABEL: bcmp_size_6:
-; LA32:       # %bb.0: # %entry
-; LA32-NEXT:    addi.w $sp, $sp, -16
-; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    ori $a2, $zero, 6
-; LA32-NEXT:    bl bcmp
-; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
-; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    ret
+; LA32-UAL-LABEL: bcmp_size_6:
+; LA32-UAL:       # %bb.0: # %entry
+; LA32-UAL-NEXT:    ld.w $a2, $a0, 0
+; LA32-UAL-NEXT:    ld.w $a3, $a1, 0
+; LA32-UAL-NEXT:    ld.hu $a0, $a0, 4
+; LA32-UAL-NEXT:    ld.hu $a1, $a1, 4
+; LA32-UAL-NEXT:    xor $a2, $a2, $a3
+; LA32-UAL-NEXT:    xor $a0, $a0, $a1
+; LA32-UAL-NEXT:    or $a0, $a2, $a0
+; LA32-UAL-NEXT:    sltu $a0, $zero, $a0
+; LA32-UAL-NEXT:    ret
 ;
-; LA64-LABEL: bcmp_size_6:
-; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    addi.d $sp, $sp, -16
-; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    ori $a2, $zero, 6
-; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
-; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    ret
+; LA64-UAL-LABEL: bcmp_size_6:
+; LA64-UAL:       # %bb.0: # %entry
+; LA64-UAL-NEXT:    ld.w $a2, $a0, 0
+; LA64-UAL-NEXT:    ld.w $a3, $a1, 0
+; LA64-UAL-NEXT:    ld.hu $a0, $a0, 4
+; LA64-UAL-NEXT:    ld.hu $a1, $a1, 4
+; LA64-UAL-NEXT:    xor $a2, $a2, $a3
+; LA64-UAL-NEXT:    xor $a0, $a0, $a1
+; LA64-UAL-NEXT:    or $a0, $a2, $a0
+; LA64-UAL-NEXT:    sltu $a0, $zero, $a0
+; LA64-UAL-NEXT:    ret
+;
+; LA32-NUAL-LABEL: bcmp_size_6:
+; LA32-NUAL:       # %bb.0: # %entry
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, -16
+; LA32-NUAL-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NUAL-NEXT:    ori $a2, $zero, 6
+; LA32-NUAL-NEXT:    bl bcmp
+; LA32-NUAL-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, 16
+; LA32-NUAL-NEXT:    ret
+;
+; LA64-NUAL-LABEL: bcmp_size_6:
+; LA64-NUAL:       # %bb.0: # %entry
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, -16
+; LA64-NUAL-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NUAL-NEXT:    ori $a2, $zero, 6
+; LA64-NUAL-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NUAL-NEXT:    jirl $ra, $ra, 0
+; LA64-NUAL-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, 16
+; LA64-NUAL-NEXT:    ret
 entry:
   %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 6)
   ret i32 %bcmp
 }
 
 define i32 @bcmp_size_7(ptr %s1, ptr %s2) nounwind {
-; LA32-LABEL: bcmp_size_7:
-; LA32:       # %bb.0: # %entry
-; LA32-NEXT:    addi.w $sp, $sp, -16
-; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    ori $a2, $zero, 7
-; LA32-NEXT:    bl bcmp
-; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
-; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    ret
+; LA32-UAL-LABEL: bcmp_size_7:
+; LA32-UAL:       # %bb.0: # %entry
+; LA32-UAL-NEXT:    ld.w $a2, $a0, 0
+; LA32-UAL-NEXT:    ld.w $a3, $a1, 0
+; LA32-UAL-NEXT:    ld.w $a0, $a0, 3
+; LA32-UAL-NEXT:    ld.w $a1, $a1, 3
+; LA32-UAL-NEXT:    xor $a2, $a2, $a3
+; LA32-UAL-NEXT:    xor $a0, $a0, $a1
+; LA32-UAL-NEXT:    or $a0, $a2, $a0
+; LA32-UAL-NEXT:    sltu $a0, $zero, $a0
+; LA32-UAL-NEXT:    ret
 ;
-; LA64-LABEL: bcmp_size_7:
-; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    addi.d $sp, $sp, -16
-; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    ori $a2, $zero, 7
-; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
-; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    ret
+; LA64-UAL-LABEL: bcmp_size_7:
+; LA64-UAL:       # %bb.0: # %entry
+; LA64-UAL-NEXT:    ld.w $a2, $a0, 0
+; LA64-UAL-NEXT:    ld.w $a3, $a1, 0
+; LA64-UAL-NEXT:    ld.w $a0, $a0, 3
+; LA64-UAL-NEXT:    ld.w $a1, $a1, 3
+; LA64-UAL-NEXT:    xor $a2, $a2, $a3
+; LA64-UAL-NEXT:    xor $a0, $a0, $a1
+; LA64-UAL-NEXT:    or $a0, $a2, $a0
+; LA64-UAL-NEXT:    sltu $a0, $zero, $a0
+; LA64-UAL-NEXT:    ret
+;
+; LA32-NUAL-LABEL: bcmp_size_7:
+; LA32-NUAL:       # %bb.0: # %entry
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, -16
+; LA32-NUAL-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NUAL-NEXT:    ori $a2, $zero, 7
+; LA32-NUAL-NEXT:    bl bcmp
+; LA32-NUAL-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, 16
+; LA32-NUAL-NEXT:    ret
+;
+; LA64-NUAL-LABEL: bcmp_size_7:
+; LA64-NUAL:       # %bb.0: # %entry
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, -16
+; LA64-NUAL-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NUAL-NEXT:    ori $a2, $zero, 7
+; LA64-NUAL-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NUAL-NEXT:    jirl $ra, $ra, 0
+; LA64-NUAL-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, 16
+; LA64-NUAL-NEXT:    ret
 entry:
   %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 7)
   ret i32 %bcmp
 }
 
 define i32 @bcmp_size_8(ptr %s1, ptr %s2) nounwind {
-; LA32-LABEL: bcmp_size_8:
-; LA32:       # %bb.0: # %entry
-; LA32-NEXT:    addi.w $sp, $sp, -16
-; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    ori $a2, $zero, 8
-; LA32-NEXT:    bl bcmp
-; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
-; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    ret
+; LA32-UAL-LABEL: bcmp_size_8:
+; LA32-UAL:       # %bb.0: # %entry
+; LA32-UAL-NEXT:    ld.w $a2, $a0, 0
+; LA32-UAL-NEXT:    ld.w $a3, $a1, 0
+; LA32-UAL-NEXT:    ld.w $a0, $a0, 4
+; LA32-UAL-NEXT:    ld.w $a1, $a1, 4
+; LA32-UAL-NEXT:    xor $a2, $a2, $a3
+; LA32-UAL-NEXT:    xor $a0, $a0, $a1
+; LA32-UAL-NEXT:    or $a0, $a2, $a0
+; LA32-UAL-NEXT:    sltu $a0, $zero, $a0
+; LA32-UAL-NEXT:    ret
 ;
-; LA64-LABEL: bcmp_size_8:
-; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    addi.d $sp, $sp, -16
-; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    ori $a2, $zero, 8
-; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
-; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    ret
+; LA64-UAL-LABEL: bcmp_size_8:
+; LA64-UAL:       # %bb.0: # %entry
+; LA64-UAL-NEXT:    ld.d $a0, $a0, 0
+; LA64-UAL-NEXT:    ld.d $a1, $a1, 0
+; LA64-UAL-NEXT:    xor $a0, $a0, $a1
+; LA64-UAL-NEXT:    sltu $a0, $zero, $a0
+; LA64-UAL-NEXT:    ret
+;
+; LA32-NUAL-LABEL: bcmp_size_8:
+; LA32-NUAL:       # %bb.0: # %entry
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, -16
+; LA32-NUAL-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NUAL-NEXT:    ori $a2, $zero, 8
+; LA32-NUAL-NEXT:    bl bcmp
+; LA32-NUAL-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, 16
+; LA32-NUAL-NEXT:    ret
+;
+; LA64-NUAL-LABEL: bcmp_size_8:
+; LA64-NUAL:       # %bb.0: # %entry
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, -16
+; LA64-NUAL-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NUAL-NEXT:    ori $a2, $zero, 8
+; LA64-NUAL-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NUAL-NEXT:    jirl $ra, $ra, 0
+; LA64-NUAL-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, 16
+; LA64-NUAL-NEXT:    ret
 entry:
   %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 8)
   ret i32 %bcmp
 }
 
 define i32 @bcmp_size_15(ptr %s1, ptr %s2) nounwind {
-; LA32-LABEL: bcmp_size_15:
-; LA32:       # %bb.0: # %entry
-; LA32-NEXT:    addi.w $sp, $sp, -16
-; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    ori $a2, $zero, 15
-; LA32-NEXT:    bl bcmp
-; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
-; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    ret
+; LA32-UAL-LABEL: bcmp_size_15:
+; LA32-UAL:       # %bb.0: # %entry
+; LA32-UAL-NEXT:    ld.w $a2, $a0, 0
+; LA32-UAL-NEXT:    ld.w $a3, $a1, 0
+; LA32-UAL-NEXT:    ld.w $a4, $a0, 4
+; LA32-UAL-NEXT:    ld.w $a5, $a1, 4
+; LA32-UAL-NEXT:    ld.w $a6, $a0, 8
+; LA32-UAL-NEXT:    ld.w $a7, $a1, 8
+; LA32-UAL-NEXT:    ld.w $a0, $a0, 11
+; LA32-UAL-NEXT:    ld.w $a1, $a1, 11
+; LA32-UAL-NEXT:    xor $a2, $a2, $a3
+; LA32-UAL-NEXT:    xor $a3, $a4, $a5
+; LA32-UAL-NEXT:    xor $a4, $a6, $a7
+; LA32-UAL-NEXT:    xor $a0, $a0, $a1
+; LA32-UAL-NEXT:    or $a1, $a2, $a3
+; LA32-UAL-NEXT:    or $a0, $a4, $a0
+; LA32-UAL-NEXT:    or $a0, $a1, $a0
+; LA32-UAL-NEXT:    sltu $a0, $zero, $a0
+; LA32-UAL-NEXT:    ret
 ;
-; LA64-LABEL: bcmp_size_15:
-; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    addi.d $sp, $sp, -16
-; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    ori $a2, $zero, 15
-; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
-; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    ret
+; LA64-UAL-LABEL: bcmp_size_15:
+; LA64-UAL:       # %bb.0: # %entry
+; LA64-UAL-NEXT:    ld.d $a2, $a0, 0
+; LA64-UAL-NEXT:    ld.d $a3, $a1, 0
+; LA64-UAL-NEXT:    ld.d $a0, $a0, 7
+; LA64-UAL-NEXT:    ld.d $a1, $a1, 7
+; LA64-UAL-NEXT:    xor $a2, $a2, $a3
+; LA64-UAL-NEXT:    xor $a0, $a0, $a1
+; LA64-UAL-NEXT:    or $a0, $a2, $a0
+; LA64-UAL-NEXT:    sltu $a0, $zero, $a0
+; LA64-UAL-NEXT:    ret
+;
+; LA32-NUAL-LABEL: bcmp_size_15:
+; LA32-NUAL:       # %bb.0: # %entry
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, -16
+; LA32-NUAL-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NUAL-NEXT:    ori $a2, $zero, 15
+; LA32-NUAL-NEXT:    bl bcmp
+; LA32-NUAL-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, 16
+; LA32-NUAL-NEXT:    ret
+;
+; LA64-NUAL-LABEL: bcmp_size_15:
+; LA64-NUAL:       # %bb.0: # %entry
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, -16
+; LA64-NUAL-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NUAL-NEXT:    ori $a2, $zero, 15
+; LA64-NUAL-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NUAL-NEXT:    jirl $ra, $ra, 0
+; LA64-NUAL-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, 16
+; LA64-NUAL-NEXT:    ret
 entry:
   %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 15)
   ret i32 %bcmp
 }
 
 define i32 @bcmp_size_16(ptr %s1, ptr %s2) nounwind {
-; LA32-LABEL: bcmp_size_16:
-; LA32:       # %bb.0: # %entry
-; LA32-NEXT:    addi.w $sp, $sp, -16
-; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    ori $a2, $zero, 16
-; LA32-NEXT:    bl bcmp
-; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
-; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    ret
+; LA32-UAL-LABEL: bcmp_size_16:
+; LA32-UAL:       # %bb.0: # %entry
+; LA32-UAL-NEXT:    ld.w $a2, $a0, 0
+; LA32-UAL-NEXT:    ld.w $a3, $a1, 0
+; LA32-UAL-NEXT:    ld.w $a4, $a0, 4
+; LA32-UAL-NEXT:    ld.w $a5, $a1, 4
+; LA32-UAL-NEXT:    ld.w $a6, $a0, 8
+; LA32-UAL-NEXT:    ld.w $a7, $a1, 8
+; LA32-UAL-NEXT:    ld.w $a0, $a0, 12
+; LA32-UAL-NEXT:    ld.w $a1, $a1, 12
+; LA32-UAL-NEXT:    xor $a2, $a2, $a3
+; LA32-UAL-NEXT:    xor $a3, $a4, $a5
+; LA32-UAL-NEXT:    xor $a4, $a6, $a7
+; LA32-UAL-NEXT:    xor $a0, $a0, $a1
+; LA32-UAL-NEXT:    or $a1, $a2, $a3
+; LA32-UAL-NEXT:    or $a0, $a4, $a0
+; LA32-UAL-NEXT:    or $a0, $a1, $a0
+; LA32-UAL-NEXT:    sltu $a0, $zero, $a0
+; LA32-UAL-NEXT:    ret
 ;
-; LA64-LABEL: bcmp_size_16:
-; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    addi.d $sp, $sp, -16
-; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    ori $a2, $zero, 16
-; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
-; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    ret
+; LA64-UAL-LABEL: bcmp_size_16:
+; LA64-UAL:       # %bb.0: # %entry
+; LA64-UAL-NEXT:    ld.d $a2, $a0, 0
+; LA64-UAL-NEXT:    ld.d $a3, $a1, 0
+; LA64-UAL-NEXT:    ld.d $a0, $a0, 8
+; LA64-UAL-NEXT:    ld.d $a1, $a1, 8
+; LA64-UAL-NEXT:    xor $a2, $a2, $a3
+; LA64-UAL-NEXT:    xor $a0, $a0, $a1
+; LA64-UAL-NEXT:    or $a0, $a2, $a0
+; LA64-UAL-NEXT:    sltu $a0, $zero, $a0
+; LA64-UAL-NEXT:    ret
+;
+; LA32-NUAL-LABEL: bcmp_size_16:
+; LA32-NUAL:       # %bb.0: # %entry
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, -16
+; LA32-NUAL-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NUAL-NEXT:    ori $a2, $zero, 16
+; LA32-NUAL-NEXT:    bl bcmp
+; LA32-NUAL-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, 16
+; LA32-NUAL-NEXT:    ret
+;
+; LA64-NUAL-LABEL: bcmp_size_16:
+; LA64-NUAL:       # %bb.0: # %entry
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, -16
+; LA64-NUAL-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NUAL-NEXT:    ori $a2, $zero, 16
+; LA64-NUAL-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NUAL-NEXT:    jirl $ra, $ra, 0
+; LA64-NUAL-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, 16
+; LA64-NUAL-NEXT:    ret
 entry:
   %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 16)
   ret i32 %bcmp
 }
 
 define i32 @bcmp_size_31(ptr %s1, ptr %s2) nounwind {
-; LA32-LABEL: bcmp_size_31:
-; LA32:       # %bb.0: # %entry
-; LA32-NEXT:    addi.w $sp, $sp, -16
-; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    ori $a2, $zero, 31
-; LA32-NEXT:    bl bcmp
-; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
-; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    ret
+; LA32-UAL-LABEL: bcmp_size_31:
+; LA32-UAL:       # %bb.0: # %entry
+; LA32-UAL-NEXT:    ld.w $a2, $a0, 0
+; LA32-UAL-NEXT:    ld.w $a3, $a1, 0
+; LA32-UAL-NEXT:    ld.w $a4, $a0, 4
+; LA32-UAL-NEXT:    ld.w $a5, $a1, 4
+; LA32-UAL-NEXT:    ld.w $a6, $a0, 8
+; LA32-UAL-NEXT:    ld.w $a7, $a1, 8
+; LA32-UAL-NEXT:    ld.w $t0, $a0, 12
+; LA32-UAL-NEXT:    ld.w $t1, $a1, 12
+; LA32-UAL-NEXT:    xor $a2, $a2, $a3
+; LA32-UAL-NEXT:    xor $a3, $a4, $a5
+; LA32-UAL-NEXT:    xor $a4, $a6, $a7
+; LA32-UAL-NEXT:    xor $a5, $t0, $t1
+; LA32-UAL-NEXT:    ld.w $a6, $a0, 16
+; LA32-UAL-NEXT:    ld.w $a7, $a1, 16
+; LA32-UAL-NEXT:    ld.w $t0, $a0, 20
+; LA32-UAL-NEXT:    ld.w $t1, $a1, 20
+; LA32-UAL-NEXT:    ld.w $t2, $a0, 24
+; LA32-UAL-NEXT:    ld.w $t3, $a1, 24
+; LA32-UAL-NEXT:    ld.w $a0, $a0, 27
+; LA32-UAL-NEXT:    ld.w $a1, $a1, 27
+; LA32-UAL-NEXT:    xor $a6, $a6, $a7
+; LA32-UAL-NEXT:    xor $a7, $t0, $t1
+; LA32-UAL-NEXT:    xor $t0, $t2, $t3
+; LA32-UAL-NEXT:    xor $a0, $a0, $a1
+; LA32-UAL-NEXT:    or $a1, $a2, $a3
+; LA32-UAL-NEXT:    or $a2, $a4, $a5
+; LA32-UAL-NEXT:    or $a3, $a6, $a7
+; LA32-UAL-NEXT:    or $a0, $t0, $a0
+; LA32-UAL-NEXT:    or $a1, $a1, $a2
+; LA32-UAL-NEXT:    or $a0, $a3, $a0
+; LA32-UAL-NEXT:    or $a0, $a1, $a0
+; LA32-UAL-NEXT:    sltu $a0, $zero, $a0
+; LA32-UAL-NEXT:    ret
 ;
-; LA64-LABEL: bcmp_size_31:
-; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    addi.d $sp, $sp, -16
-; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    ori $a2, $zero, 31
-; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
-; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    ret
+; LA64-UAL-LABEL: bcmp_size_31:
+; LA64-UAL:       # %bb.0: # %entry
+; LA64-UAL-NEXT:    ld.d $a2, $a0, 0
+; LA64-UAL-NEXT:    ld.d $a3, $a1, 0
+; LA64-UAL-NEXT:    ld.d $a4, $a0, 8
+; LA64-UAL-NEXT:    ld.d $a5, $a1, 8
+; LA64-UAL-NEXT:    ld.d $a6, $a0, 16
+; LA64-UAL-NEXT:    ld.d $a7, $a1, 16
+; LA64-UAL-NEXT:    ld.d $a0, $a0, 23
+; LA64-UAL-NEXT:    ld.d $a1, $a1, 23
+; LA64-UAL-NEXT:    xor $a2, $a2, $a3
+; LA64-UAL-NEXT:    xor $a3, $a4, $a5
+; LA64-UAL-NEXT:    xor $a4, $a6, $a7
+; LA64-UAL-NEXT:    xor $a0, $a0, $a1
+; LA64-UAL-NEXT:    or $a1, $a2, $a3
+; LA64-UAL-NEXT:    or $a0, $a4, $a0
+; LA64-UAL-NEXT:    or $a0, $a1, $a0
+; LA64-UAL-NEXT:    sltu $a0, $zero, $a0
+; LA64-UAL-NEXT:    ret
+;
+; LA32-NUAL-LABEL: bcmp_size_31:
+; LA32-NUAL:       # %bb.0: # %entry
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, -16
+; LA32-NUAL-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NUAL-NEXT:    ori $a2, $zero, 31
+; LA32-NUAL-NEXT:    bl bcmp
+; LA32-NUAL-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, 16
+; LA32-NUAL-NEXT:    ret
+;
+; LA64-NUAL-LABEL: bcmp_size_31:
+; LA64-NUAL:       # %bb.0: # %entry
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, -16
+; LA64-NUAL-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NUAL-NEXT:    ori $a2, $zero, 31
+; LA64-NUAL-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NUAL-NEXT:    jirl $ra, $ra, 0
+; LA64-NUAL-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, 16
+; LA64-NUAL-NEXT:    ret
 entry:
   %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 31)
   ret i32 %bcmp
 }
 
 define i32 @bcmp_size_32(ptr %s1, ptr %s2) nounwind {
-; LA32-LABEL: bcmp_size_32:
-; LA32:       # %bb.0: # %entry
-; LA32-NEXT:    addi.w $sp, $sp, -16
-; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    ori $a2, $zero, 32
-; LA32-NEXT:    bl bcmp
-; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
-; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    ret
+; LA32-UAL-LABEL: bcmp_size_32:
+; LA32-UAL:       # %bb.0: # %entry
+; LA32-UAL-NEXT:    ld.w $a2, $a0, 0
+; LA32-UAL-NEXT:    ld.w $a3, $a1, 0
+; LA32-UAL-NEXT:    ld.w $a4, $a0, 4
+; LA32-UAL-NEXT:    ld.w $a5, $a1, 4
+; LA32-UAL-NEXT:    ld.w $a6, $a0, 8
+; LA32-UAL-NEXT:    ld.w $a7, $a1, 8
+; LA32-UAL-NEXT:    ld.w $t0, $a0, 12
+; LA32-UAL-NEXT:    ld.w $t1, $a1, 12
+; LA32-UAL-NEXT:    xor $a2, $a2, $a3
+; LA32-UAL-NEXT:    xor $a3, $a4, $a5
+; LA32-UAL-NEXT:    xor $a4, $a6, $a7
+; LA32-UAL-NEXT:    xor $a5, $t0, $t1
+; LA32-UAL-NEXT:    ld.w $a6, $a0, 16
+; LA32-UAL-NEXT:    ld.w $a7, $a1, 16
+; LA32-UAL-NEXT:    ld.w $t0, $a0, 20
+; LA32-UAL-NEXT:    ld.w $t1, $a1, 20
+; LA32-UAL-NEXT:    ld.w $t2, $a0, 24
+; LA32-UAL-NEXT:    ld.w $t3, $a1, 24
+; LA32-UAL-NEXT:    ld.w $a0, $a0, 28
+; LA32-UAL-NEXT:    ld.w $a1, $a1, 28
+; LA32-UAL-NEXT:    xor $a6, $a6, $a7
+; LA32-UAL-NEXT:    xor $a7, $t0, $t1
+; LA32-UAL-NEXT:    xor $t0, $t2, $t3
+; LA32-UAL-NEXT:    xor $a0, $a0, $a1
+; LA32-UAL-NEXT:    or $a1, $a2, $a3
+; LA32-UAL-NEXT:    or $a2, $a4, $a5
+; LA32-UAL-NEXT:    or $a3, $a6, $a7
+; LA32-UAL-NEXT:    or $a0, $t0, $a0
+; LA32-UAL-NEXT:    or $a1, $a1, $a2
+; LA32-UAL-NEXT:    or $a0, $a3, $a0
+; LA32-UAL-NEXT:    or $a0, $a1, $a0
+; LA32-UAL-NEXT:    sltu $a0, $zero, $a0
+; LA32-UAL-NEXT:    ret
 ;
-; LA64-LABEL: bcmp_size_32:
-; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    addi.d $sp, $sp, -16
-; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    ori $a2, $zero, 32
-; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
-; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    ret
+; LA64-UAL-LABEL: bcmp_size_32:
+; LA64-UAL:       # %bb.0: # %entry
+; LA64-UAL-NEXT:    ld.d $a2, $a0, 0
+; LA64-UAL-NEXT:    ld.d $a3, $a1, 0
+; LA64-UAL-NEXT:    ld.d $a4, $a0, 8
+; LA64-UAL-NEXT:    ld.d $a5, $a1, 8
+; LA64-UAL-NEXT:    ld.d $a6, $a0, 16
+; LA64-UAL-NEXT:    ld.d $a7, $a1, 16
+; LA64-UAL-NEXT:    ld.d $a0, $a0, 24
+; LA64-UAL-NEXT:    ld.d $a1, $a1, 24
+; LA64-UAL-NEXT:    xor $a2, $a2, $a3
+; LA64-UAL-NEXT:    xor $a3, $a4, $a5
+; LA64-UAL-NEXT:    xor $a4, $a6, $a7
+; LA64-UAL-NEXT:    xor $a0, $a0, $a1
+; LA64-UAL-NEXT:    or $a1, $a2, $a3
+; LA64-UAL-NEXT:    or $a0, $a4, $a0
+; LA64-UAL-NEXT:    or $a0, $a1, $a0
+; LA64-UAL-NEXT:    sltu $a0, $zero, $a0
+; LA64-UAL-NEXT:    ret
+;
+; LA32-NUAL-LABEL: bcmp_size_32:
+; LA32-NUAL:       # %bb.0: # %entry
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, -16
+; LA32-NUAL-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NUAL-NEXT:    ori $a2, $zero, 32
+; LA32-NUAL-NEXT:    bl bcmp
+; LA32-NUAL-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, 16
+; LA32-NUAL-NEXT:    ret
+;
+; LA64-NUAL-LABEL: bcmp_size_32:
+; LA64-NUAL:       # %bb.0: # %entry
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, -16
+; LA64-NUAL-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NUAL-NEXT:    ori $a2, $zero, 32
+; LA64-NUAL-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NUAL-NEXT:    jirl $ra, $ra, 0
+; LA64-NUAL-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, 16
+; LA64-NUAL-NEXT:    ret
 entry:
   %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 32)
   ret i32 %bcmp
@@ -360,16 +700,52 @@ define i32 @bcmp_size_63(ptr %s1, ptr %s2) nounwind {
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
 ;
-; LA64-LABEL: bcmp_size_63:
-; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    addi.d $sp, $sp, -16
-; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    ori $a2, $zero, 63
-; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
-; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    ret
+; LA64-UAL-LABEL: bcmp_size_63:
+; LA64-UAL:       # %bb.0: # %entry
+; LA64-UAL-NEXT:    ld.d $a2, $a0, 0
+; LA64-UAL-NEXT:    ld.d $a3, $a1, 0
+; LA64-UAL-NEXT:    ld.d $a4, $a0, 8
+; LA64-UAL-NEXT:    ld.d $a5, $a1, 8
+; LA64-UAL-NEXT:    ld.d $a6, $a0, 16
+; LA64-UAL-NEXT:    ld.d $a7, $a1, 16
+; LA64-UAL-NEXT:    ld.d $t0, $a0, 24
+; LA64-UAL-NEXT:    ld.d $t1, $a1, 24
+; LA64-UAL-NEXT:    xor $a2, $a2, $a3
+; LA64-UAL-NEXT:    xor $a3, $a4, $a5
+; LA64-UAL-NEXT:    xor $a4, $a6, $a7
+; LA64-UAL-NEXT:    xor $a5, $t0, $t1
+; LA64-UAL-NEXT:    ld.d $a6, $a0, 32
+; LA64-UAL-NEXT:    ld.d $a7, $a1, 32
+; LA64-UAL-NEXT:    ld.d $t0, $a0, 40
+; LA64-UAL-NEXT:    ld.d $t1, $a1, 40
+; LA64-UAL-NEXT:    ld.d $t2, $a0, 48
+; LA64-UAL-NEXT:    ld.d $t3, $a1, 48
+; LA64-UAL-NEXT:    ld.d $a0, $a0, 55
+; LA64-UAL-NEXT:    ld.d $a1, $a1, 55
+; LA64-UAL-NEXT:    xor $a6, $a6, $a7
+; LA64-UAL-NEXT:    xor $a7, $t0, $t1
+; LA64-UAL-NEXT:    xor $t0, $t2, $t3
+; LA64-UAL-NEXT:    xor $a0, $a0, $a1
+; LA64-UAL-NEXT:    or $a1, $a2, $a3
+; LA64-UAL-NEXT:    or $a2, $a4, $a5
+; LA64-UAL-NEXT:    or $a3, $a6, $a7
+; LA64-UAL-NEXT:    or $a0, $t0, $a0
+; LA64-UAL-NEXT:    or $a1, $a1, $a2
+; LA64-UAL-NEXT:    or $a0, $a3, $a0
+; LA64-UAL-NEXT:    or $a0, $a1, $a0
+; LA64-UAL-NEXT:    sltu $a0, $zero, $a0
+; LA64-UAL-NEXT:    ret
+;
+; LA64-NUAL-LABEL: bcmp_size_63:
+; LA64-NUAL:       # %bb.0: # %entry
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, -16
+; LA64-NUAL-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NUAL-NEXT:    ori $a2, $zero, 63
+; LA64-NUAL-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NUAL-NEXT:    jirl $ra, $ra, 0
+; LA64-NUAL-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, 16
+; LA64-NUAL-NEXT:    ret
 entry:
   %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 63)
   ret i32 %bcmp
@@ -386,16 +762,52 @@ define i32 @bcmp_size_64(ptr %s1, ptr %s2) nounwind {
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
 ;
-; LA64-LABEL: bcmp_size_64:
-; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    addi.d $sp, $sp, -16
-; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    ori $a2, $zero, 64
-; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
-; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    ret
+; LA64-UAL-LABEL: bcmp_size_64:
+; LA64-UAL:       # %bb.0: # %entry
+; LA64-UAL-NEXT:    ld.d $a2, $a0, 0
+; LA64-UAL-NEXT:    ld.d $a3, $a1, 0
+; LA64-UAL-NEXT:    ld.d $a4, $a0, 8
+; LA64-UAL-NEXT:    ld.d $a5, $a1, 8
+; LA64-UAL-NEXT:    ld.d $a6, $a0, 16
+; LA64-UAL-NEXT:    ld.d $a7, $a1, 16
+; LA64-UAL-NEXT:    ld.d $t0, $a0, 24
+; LA64-UAL-NEXT:    ld.d $t1, $a1, 24
+; LA64-UAL-NEXT:    xor $a2, $a2, $a3
+; LA64-UAL-NEXT:    xor $a3, $a4, $a5
+; LA64-UAL-NEXT:    xor $a4, $a6, $a7
+; LA64-UAL-NEXT:    xor $a5, $t0, $t1
+; LA64-UAL-NEXT:    ld.d $a6, $a0, 32
+; LA64-UAL-NEXT:    ld.d $a7, $a1, 32
+; LA64-UAL-NEXT:    ld.d $t0, $a0, 40
+; LA64-UAL-NEXT:    ld.d $t1, $a1, 40
+; LA64-UAL-NEXT:    ld.d $t2, $a0, 48
+; LA64-UAL-NEXT:    ld.d $t3, $a1, 48
+; LA64-UAL-NEXT:    ld.d $a0, $a0, 56
+; LA64-UAL-NEXT:    ld.d $a1, $a1, 56
+; LA64-UAL-NEXT:    xor $a6, $a6, $a7
+; LA64-UAL-NEXT:    xor $a7, $t0, $t1
+; LA64-UAL-NEXT:    xor $t0, $t2, $t3
+; LA64-UAL-NEXT:    xor $a0, $a0, $a1
+; LA64-UAL-NEXT:    or $a1, $a2, $a3
+; LA64-UAL-NEXT:    or $a2, $a4, $a5
+; LA64-UAL-NEXT:    or $a3, $a6, $a7
+; LA64-UAL-NEXT:    or $a0, $t0, $a0
+; LA64-UAL-NEXT:    or $a1, $a1, $a2
+; LA64-UAL-NEXT:    or $a0, $a3, $a0
+; LA64-UAL-NEXT:    or $a0, $a1, $a0
+; LA64-UAL-NEXT:    sltu $a0, $zero, $a0
+; LA64-UAL-NEXT:    ret
+;
+; LA64-NUAL-LABEL: bcmp_size_64:
+; LA64-NUAL:       # %bb.0: # %entry
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, -16
+; LA64-NUAL-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NUAL-NEXT:    ori $a2, $zero, 64
+; LA64-NUAL-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NUAL-NEXT:    jirl $ra, $ra, 0
+; LA64-NUAL-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, 16
+; LA64-NUAL-NEXT:    ret
 entry:
   %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 64)
   ret i32 %bcmp
@@ -478,28 +890,60 @@ entry:
 }
 
 define i1 @bcmp_eq_zero(ptr %s1, ptr %s2) nounwind {
-; LA32-LABEL: bcmp_eq_zero:
-; LA32:       # %bb.0: # %entry
-; LA32-NEXT:    addi.w $sp, $sp, -16
-; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    ori $a2, $zero, 16
-; LA32-NEXT:    bl bcmp
-; LA32-NEXT:    sltui $a0, $a0, 1
-; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
-; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    ret
+; LA32-UAL-LABEL: bcmp_eq_zero:
+; LA32-UAL:       # %bb.0: # %entry
+; LA32-UAL-NEXT:    ld.w $a2, $a0, 0
+; LA32-UAL-NEXT:    ld.w $a3, $a1, 0
+; LA32-UAL-NEXT:    ld.w $a4, $a0, 4
+; LA32-UAL-NEXT:    ld.w $a5, $a1, 4
+; LA32-UAL-NEXT:    ld.w $a6, $a0, 8
+; LA32-UAL-NEXT:    ld.w $a7, $a1, 8
+; LA32-UAL-NEXT:    ld.w $a0, $a0, 12
+; LA32-UAL-NEXT:    ld.w $a1, $a1, 12
+; LA32-UAL-NEXT:    xor $a2, $a2, $a3
+; LA32-UAL-NEXT:    xor $a3, $a4, $a5
+; LA32-UAL-NEXT:    xor $a4, $a6, $a7
+; LA32-UAL-NEXT:    xor $a0, $a0, $a1
+; LA32-UAL-NEXT:    or $a1, $a2, $a3
+; LA32-UAL-NEXT:    or $a0, $a4, $a0
+; LA32-UAL-NEXT:    or $a0, $a1, $a0
+; LA32-UAL-NEXT:    sltui $a0, $a0, 1
+; LA32-UAL-NEXT:    ret
 ;
-; LA64-LABEL: bcmp_eq_zero:
-; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    addi.d $sp, $sp, -16
-; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    ori $a2, $zero, 16
-; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
-; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    sltui $a0, $a0, 1
-; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    ret
+; LA64-UAL-LABEL: bcmp_eq_zero:
+; LA64-UAL:       # %bb.0: # %entry
+; LA64-UAL-NEXT:    ld.d $a2, $a0, 0
+; LA64-UAL-NEXT:    ld.d $a3, $a1, 0
+; LA64-UAL-NEXT:    ld.d $a0, $a0, 8
+; LA64-UAL-NEXT:    ld.d $a1, $a1, 8
+; LA64-UAL-NEXT:    xor $a2, $a2, $a3
+; LA64-UAL-NEXT:    xor $a0, $a0, $a1
+; LA64-UAL-NEXT:    or $a0, $a2, $a0
+; LA64-UAL-NEXT:    sltui $a0, $a0, 1
+; LA64-UAL-NEXT:    ret
+;
+; LA32-NUAL-LABEL: bcmp_eq_zero:
+; LA32-NUAL:       # %bb.0: # %entry
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, -16
+; LA32-NUAL-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NUAL-NEXT:    ori $a2, $zero, 16
+; LA32-NUAL-NEXT:    bl bcmp
+; LA32-NUAL-NEXT:    sltui $a0, $a0, 1
+; LA32-NUAL-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, 16
+; LA32-NUAL-NEXT:    ret
+;
+; LA64-NUAL-LABEL: bcmp_eq_zero:
+; LA64-NUAL:       # %bb.0: # %entry
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, -16
+; LA64-NUAL-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NUAL-NEXT:    ori $a2, $zero, 16
+; LA64-NUAL-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NUAL-NEXT:    jirl $ra, $ra, 0
+; LA64-NUAL-NEXT:    sltui $a0, $a0, 1
+; LA64-NUAL-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, 16
+; LA64-NUAL-NEXT:    ret
 entry:
   %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 16)
   %ret = icmp eq i32 %bcmp, 0
@@ -507,28 +951,38 @@ entry:
 }
 
 define i1 @bcmp_lt_zero(ptr %s1, ptr %s2) nounwind {
-; LA32-LABEL: bcmp_lt_zero:
-; LA32:       # %bb.0: # %entry
-; LA32-NEXT:    addi.w $sp, $sp, -16
-; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    ori $a2, $zero, 4
-; LA32-NEXT:    bl bcmp
-; LA32-NEXT:    srli.w $a0, $a0, 31
-; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
-; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    ret
+; LA32-UAL-LABEL: bcmp_lt_zero:
+; LA32-UAL:       # %bb.0: # %entry
+; LA32-UAL-NEXT:    move $a0, $zero
+; LA32-UAL-NEXT:    ret
 ;
-; LA64-LABEL: bcmp_lt_zero:
-; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    addi.d $sp, $sp, -16
-; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    ori $a2, $zero, 4
-; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
-; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    slti $a0, $a0, 0
-; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    ret
+; LA64-UAL-LABEL: bcmp_lt_zero:
+; LA64-UAL:       # %bb.0: # %entry
+; LA64-UAL-NEXT:    move $a0, $zero
+; LA64-UAL-NEXT:    ret
+;
+; LA32-NUAL-LABEL: bcmp_lt_zero:
+; LA32-NUAL:       # %bb.0: # %entry
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, -16
+; LA32-NUAL-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NUAL-NEXT:    ori $a2, $zero, 4
+; LA32-NUAL-NEXT:    bl bcmp
+; LA32-NUAL-NEXT:    srli.w $a0, $a0, 31
+; LA32-NUAL-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, 16
+; LA32-NUAL-NEXT:    ret
+;
+; LA64-NUAL-LABEL: bcmp_lt_zero:
+; LA64-NUAL:       # %bb.0: # %entry
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, -16
+; LA64-NUAL-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NUAL-NEXT:    ori $a2, $zero, 4
+; LA64-NUAL-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NUAL-NEXT:    jirl $ra, $ra, 0
+; LA64-NUAL-NEXT:    slti $a0, $a0, 0
+; LA64-NUAL-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, 16
+; LA64-NUAL-NEXT:    ret
 entry:
   %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 4)
   %ret = icmp slt i32 %bcmp, 0
@@ -536,28 +990,44 @@ entry:
 }
 
 define i1 @bcmp_gt_zero(ptr %s1, ptr %s2) nounwind {
-; LA32-LABEL: bcmp_gt_zero:
-; LA32:       # %bb.0: # %entry
-; LA32-NEXT:    addi.w $sp, $sp, -16
-; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    ori $a2, $zero, 4
-; LA32-NEXT:    bl bcmp
-; LA32-NEXT:    slt $a0, $zero, $a0
-; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
-; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    ret
+; LA32-UAL-LABEL: bcmp_gt_zero:
+; LA32-UAL:       # %bb.0: # %entry
+; LA32-UAL-NEXT:    ld.w $a0, $a0, 0
+; LA32-UAL-NEXT:    ld.w $a1, $a1, 0
+; LA32-UAL-NEXT:    xor $a0, $a0, $a1
+; LA32-UAL-NEXT:    sltu $a0, $zero, $a0
+; LA32-UAL-NEXT:    ret
 ;
-; LA64-LABEL: bcmp_gt_zero:
-; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    addi.d $sp, $sp, -16
-; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    ori $a2, $zero, 4
-; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
-; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    slt $a0, $zero, $a0
-; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    ret
+; LA64-UAL-LABEL: bcmp_gt_zero:
+; LA64-UAL:       # %bb.0: # %entry
+; LA64-UAL-NEXT:    ld.w $a0, $a0, 0
+; LA64-UAL-NEXT:    ld.w $a1, $a1, 0
+; LA64-UAL-NEXT:    xor $a0, $a0, $a1
+; LA64-UAL-NEXT:    sltu $a0, $zero, $a0
+; LA64-UAL-NEXT:    ret
+;
+; LA32-NUAL-LABEL: bcmp_gt_zero:
+; LA32-NUAL:       # %bb.0: # %entry
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, -16
+; LA32-NUAL-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NUAL-NEXT:    ori $a2, $zero, 4
+; LA32-NUAL-NEXT:    bl bcmp
+; LA32-NUAL-NEXT:    slt $a0, $zero, $a0
+; LA32-NUAL-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, 16
+; LA32-NUAL-NEXT:    ret
+;
+; LA64-NUAL-LABEL: bcmp_gt_zero:
+; LA64-NUAL:       # %bb.0: # %entry
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, -16
+; LA64-NUAL-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NUAL-NEXT:    ori $a2, $zero, 4
+; LA64-NUAL-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NUAL-NEXT:    jirl $ra, $ra, 0
+; LA64-NUAL-NEXT:    slt $a0, $zero, $a0
+; LA64-NUAL-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, 16
+; LA64-NUAL-NEXT:    ret
 entry:
   %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 4)
   %ret = icmp sgt i32 %bcmp, 0
@@ -565,28 +1035,46 @@ entry:
 }
 
 define i1 @bcmp_le_zero(ptr %s1, ptr %s2) nounwind {
-; LA32-LABEL: bcmp_le_zero:
-; LA32:       # %bb.0: # %entry
-; LA32-NEXT:    addi.w $sp, $sp, -16
-; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    ori $a2, $zero, 4
-; LA32-NEXT:    bl bcmp
-; LA32-NEXT:    slti $a0, $a0, 1
-; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
-; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    ret
+; LA32-UAL-LABEL: bcmp_le_zero:
+; LA32-UAL:       # %bb.0: # %entry
+; LA32-UAL-NEXT:    ld.w $a0, $a0, 0
+; LA32-UAL-NEXT:    ld.w $a1, $a1, 0
+; LA32-UAL-NEXT:    xor $a0, $a0, $a1
+; LA32-UAL-NEXT:    sltu $a0, $zero, $a0
+; LA32-UAL-NEXT:    slti $a0, $a0, 1
+; LA32-UAL-NEXT:    ret
 ;
-; LA64-LABEL: bcmp_le_zero:
-; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    addi.d $sp, $sp, -16
-; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    ori $a2, $zero, 4
-; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
-; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    slti $a0, $a0, 1
-; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    ret
+; LA64-UAL-LABEL: bcmp_le_zero:
+; LA64-UAL:       # %bb.0: # %entry
+; LA64-UAL-NEXT:    ld.w $a0, $a0, 0
+; LA64-UAL-NEXT:    ld.w $a1, $a1, 0
+; LA64-UAL-NEXT:    xor $a0, $a0, $a1
+; LA64-UAL-NEXT:    sltu $a0, $zero, $a0
+; LA64-UAL-NEXT:    slti $a0, $a0, 1
+; LA64-UAL-NEXT:    ret
+;
+; LA32-NUAL-LABEL: bcmp_le_zero:
+; LA32-NUAL:       # %bb.0: # %entry
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, -16
+; LA32-NUAL-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NUAL-NEXT:    ori $a2, $zero, 4
+; LA32-NUAL-NEXT:    bl bcmp
+; LA32-NUAL-NEXT:    slti $a0, $a0, 1
+; LA32-NUAL-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, 16
+; LA32-NUAL-NEXT:    ret
+;
+; LA64-NUAL-LABEL: bcmp_le_zero:
+; LA64-NUAL:       # %bb.0: # %entry
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, -16
+; LA64-NUAL-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NUAL-NEXT:    ori $a2, $zero, 4
+; LA64-NUAL-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NUAL-NEXT:    jirl $ra, $ra, 0
+; LA64-NUAL-NEXT:    slti $a0, $a0, 1
+; LA64-NUAL-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, 16
+; LA64-NUAL-NEXT:    ret
 entry:
   %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 4)
   %ret = icmp slt i32 %bcmp, 1
@@ -594,30 +1082,40 @@ entry:
 }
 
 define i1 @bcmp_ge_zero(ptr %s1, ptr %s2) nounwind {
-; LA32-LABEL: bcmp_ge_zero:
-; LA32:       # %bb.0: # %entry
-; LA32-NEXT:    addi.w $sp, $sp, -16
-; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    ori $a2, $zero, 4
-; LA32-NEXT:    bl bcmp
-; LA32-NEXT:    addi.w $a1, $zero, -1
-; LA32-NEXT:    slt $a0, $a1, $a0
-; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
-; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    ret
+; LA32-UAL-LABEL: bcmp_ge_zero:
+; LA32-UAL:       # %bb.0: # %entry
+; LA32-UAL-NEXT:    ori $a0, $zero, 1
+; LA32-UAL-NEXT:    ret
 ;
-; LA64-LABEL: bcmp_ge_zero:
-; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    addi.d $sp, $sp, -16
-; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    ori $a2, $zero, 4
-; LA64-NEXT:    pcaddu18i $ra, %call36(bcmp)
-; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    addi.w $a1, $zero, -1
-; LA64-NEXT:    slt $a0, $a1, $a0
-; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    ret
+; LA64-UAL-LABEL: bcmp_ge_zero:
+; LA64-UAL:       # %bb.0: # %entry
+; LA64-UAL-NEXT:    ori $a0, $zero, 1
+; LA64-UAL-NEXT:    ret
+;
+; LA32-NUAL-LABEL: bcmp_ge_zero:
+; LA32-NUAL:       # %bb.0: # %entry
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, -16
+; LA32-NUAL-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NUAL-NEXT:    ori $a2, $zero, 4
+; LA32-NUAL-NEXT:    bl bcmp
+; LA32-NUAL-NEXT:    addi.w $a1, $zero, -1
+; LA32-NUAL-NEXT:    slt $a0, $a1, $a0
+; LA32-NUAL-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, 16
+; LA32-NUAL-NEXT:    ret
+;
+; LA64-NUAL-LABEL: bcmp_ge_zero:
+; LA64-NUAL:       # %bb.0: # %entry
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, -16
+; LA64-NUAL-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NUAL-NEXT:    ori $a2, $zero, 4
+; LA64-NUAL-NEXT:    pcaddu18i $ra, %call36(bcmp)
+; LA64-NUAL-NEXT:    jirl $ra, $ra, 0
+; LA64-NUAL-NEXT:    addi.w $a1, $zero, -1
+; LA64-NUAL-NEXT:    slt $a0, $a1, $a0
+; LA64-NUAL-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, 16
+; LA64-NUAL-NEXT:    ret
 entry:
   %bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iGRLen 4)
   %ret = icmp sgt i32 %bcmp, -1
@@ -635,312 +1133,1412 @@ entry:
 }
 
 define i32 @memcmp_size_1(ptr %s1, ptr %s2) nounwind {
-; LA32-LABEL: memcmp_size_1:
-; LA32:       # %bb.0: # %entry
-; LA32-NEXT:    addi.w $sp, $sp, -16
-; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    ori $a2, $zero, 1
-; LA32-NEXT:    bl memcmp
-; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
-; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    ret
+; LA32-UAL-LABEL: memcmp_size_1:
+; LA32-UAL:       # %bb.0: # %entry
+; LA32-UAL-NEXT:    ld.bu $a0, $a0, 0
+; LA32-UAL-NEXT:    ld.bu $a1, $a1, 0
+; LA32-UAL-NEXT:    sub.w $a0, $a0, $a1
+; LA32-UAL-NEXT:    ret
 ;
-; LA64-LABEL: memcmp_size_1:
-; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    addi.d $sp, $sp, -16
-; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    ori $a2, $zero, 1
-; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
-; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    ret
+; LA64-UAL-LABEL: memcmp_size_1:
+; LA64-UAL:       # %bb.0: # %entry
+; LA64-UAL-NEXT:    ld.bu $a0, $a0, 0
+; LA64-UAL-NEXT:    ld.bu $a1, $a1, 0
+; LA64-UAL-NEXT:    sub.d $a0, $a0, $a1
+; LA64-UAL-NEXT:    ret
+;
+; LA32-NUAL-LABEL: memcmp_size_1:
+; LA32-NUAL:       # %bb.0: # %entry
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, -16
+; LA32-NUAL-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NUAL-NEXT:    ori $a2, $zero, 1
+; LA32-NUAL-NEXT:    bl memcmp
+; LA32-NUAL-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, 16
+; LA32-NUAL-NEXT:    ret
+;
+; LA64-NUAL-LABEL: memcmp_size_1:
+; LA64-NUAL:       # %bb.0: # %entry
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, -16
+; LA64-NUAL-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NUAL-NEXT:    ori $a2, $zero, 1
+; LA64-NUAL-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NUAL-NEXT:    jirl $ra, $ra, 0
+; LA64-NUAL-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, 16
+; LA64-NUAL-NEXT:    ret
 entry:
   %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 1)
   ret i32 %memcmp
 }
 
 define i32 @memcmp_size_2(ptr %s1, ptr %s2) nounwind {
-; LA32-LABEL: memcmp_size_2:
-; LA32:       # %bb.0: # %entry
-; LA32-NEXT:    addi.w $sp, $sp, -16
-; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    ori $a2, $zero, 2
-; LA32-NEXT:    bl memcmp
-; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
-; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    ret
+; LA32-UAL-LABEL: memcmp_size_2:
+; LA32-UAL:       # %bb.0: # %entry
+; LA32-UAL-NEXT:    ld.hu $a0, $a0, 0
+; LA32-UAL-NEXT:    ld.hu $a1, $a1, 0
+; LA32-UAL-NEXT:    srli.w $a2, $a0, 8
+; LA32-UAL-NEXT:    slli.w $a0, $a0, 8
+; LA32-UAL-NEXT:    or $a0, $a0, $a2
+; LA32-UAL-NEXT:    srli.w $a2, $a1, 8
+; LA32-UAL-NEXT:    slli.w $a1, $a1, 8
+; LA32-UAL-NEXT:    or $a1, $a1, $a2
+; LA32-UAL-NEXT:    lu12i.w $a2, 15
+; LA32-UAL-NEXT:    ori $a2, $a2, 4095
+; LA32-UAL-NEXT:    and $a0, $a0, $a2
+; LA32-UAL-NEXT:    and $a1, $a1, $a2
+; LA32-UAL-NEXT:    sub.w $a0, $a0, $a1
+; LA32-UAL-NEXT:    ret
 ;
-; LA64-LABEL: memcmp_size_2:
-; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    addi.d $sp, $sp, -16
-; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    ori $a2, $zero, 2
-; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
-; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    ret
+; LA64-UAL-LABEL: memcmp_size_2:
+; LA64-UAL:       # %bb.0: # %entry
+; LA64-UAL-NEXT:    ld.h $a0, $a0, 0
+; LA64-UAL-NEXT:    ld.h $a1, $a1, 0
+; LA64-UAL-NEXT:    revb.2h $a0, $a0
+; LA64-UAL-NEXT:    revb.2h $a1, $a1
+; LA64-UAL-NEXT:    bstrpick.d $a0, $a0, 15, 0
+; LA64-UAL-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-UAL-NEXT:    sub.d $a0, $a0, $a1
+; LA64-UAL-NEXT:    ret
+;
+; LA32-NUAL-LABEL: memcmp_size_2:
+; LA32-NUAL:       # %bb.0: # %entry
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, -16
+; LA32-NUAL-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NUAL-NEXT:    ori $a2, $zero, 2
+; LA32-NUAL-NEXT:    bl memcmp
+; LA32-NUAL-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, 16
+; LA32-NUAL-NEXT:    ret
+;
+; LA64-NUAL-LABEL: memcmp_size_2:
+; LA64-NUAL:       # %bb.0: # %entry
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, -16
+; LA64-NUAL-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NUAL-NEXT:    ori $a2, $zero, 2
+; LA64-NUAL-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NUAL-NEXT:    jirl $ra, $ra, 0
+; LA64-NUAL-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, 16
+; LA64-NUAL-NEXT:    ret
 entry:
   %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 2)
   ret i32 %memcmp
 }
 
 define i32 @memcmp_size_3(ptr %s1, ptr %s2) nounwind {
-; LA32-LABEL: memcmp_size_3:
-; LA32:       # %bb.0: # %entry
-; LA32-NEXT:    addi.w $sp, $sp, -16
-; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    ori $a2, $zero, 3
-; LA32-NEXT:    bl memcmp
-; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
-; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    ret
+; LA32-UAL-LABEL: memcmp_size_3:
+; LA32-UAL:       # %bb.0: # %entry
+; LA32-UAL-NEXT:    ld.hu $a2, $a0, 0
+; LA32-UAL-NEXT:    ld.hu $a3, $a1, 0
+; LA32-UAL-NEXT:    srli.w $a4, $a2, 8
+; LA32-UAL-NEXT:    slli.w $a2, $a2, 8
+; LA32-UAL-NEXT:    or $a2, $a2, $a4
+; LA32-UAL-NEXT:    lu12i.w $a4, 15
+; LA32-UAL-NEXT:    ori $a4, $a4, 4095
+; LA32-UAL-NEXT:    and $a2, $a2, $a4
+; LA32-UAL-NEXT:    srli.w $a5, $a3, 8
+; LA32-UAL-NEXT:    slli.w $a3, $a3, 8
+; LA32-UAL-NEXT:    or $a3, $a3, $a5
+; LA32-UAL-NEXT:    and $a3, $a3, $a4
+; LA32-UAL-NEXT:    bne $a2, $a3, .LBB26_2
+; LA32-UAL-NEXT:  # %bb.1: # %loadbb1
+; LA32-UAL-NEXT:    ld.bu $a0, $a0, 2
+; LA32-UAL-NEXT:    ld.bu $a1, $a1, 2
+; LA32-UAL-NEXT:    sub.w $a0, $a0, $a1
+; LA32-UAL-NEXT:    ret
+; LA32-UAL-NEXT:  .LBB26_2: # %res_block
+; LA32-UAL-NEXT:    sltu $a0, $a2, $a3
+; LA32-UAL-NEXT:    sub.w $a0, $zero, $a0
+; LA32-UAL-NEXT:    ori $a0, $a0, 1
+; LA32-UAL-NEXT:    ret
 ;
-; LA64-LABEL: memcmp_size_3:
-; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    addi.d $sp, $sp, -16
-; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    ori $a2, $zero, 3
-; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
-; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    ret
+; LA64-UAL-LABEL: memcmp_size_3:
+; LA64-UAL:       # %bb.0: # %entry
+; LA64-UAL-NEXT:    ld.h $a2, $a0, 0
+; LA64-UAL-NEXT:    ld.h $a3, $a1, 0
+; LA64-UAL-NEXT:    revb.2h $a2, $a2
+; LA64-UAL-NEXT:    bstrpick.d $a2, $a2, 15, 0
+; LA64-UAL-NEXT:    revb.2h $a3, $a3
+; LA64-UAL-NEXT:    bstrpick.d $a3, $a3, 15, 0
+; LA64-UAL-NEXT:    bne $a2, $a3, .LBB26_2
+; LA64-UAL-NEXT:  # %bb.1: # %loadbb1
+; LA64-UAL-NEXT:    ld.bu $a0, $a0, 2
+; LA64-UAL-NEXT:    ld.bu $a1, $a1, 2
+; LA64-UAL-NEXT:    sub.d $a0, $a0, $a1
+; LA64-UAL-NEXT:    ret
+; LA64-UAL-NEXT:  .LBB26_2: # %res_block
+; LA64-UAL-NEXT:    sltu $a0, $a2, $a3
+; LA64-UAL-NEXT:    sub.d $a0, $zero, $a0
+; LA64-UAL-NEXT:    ori $a0, $a0, 1
+; LA64-UAL-NEXT:    ret
+;
+; LA32-NUAL-LABEL: memcmp_size_3:
+; LA32-NUAL:       # %bb.0: # %entry
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, -16
+; LA32-NUAL-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NUAL-NEXT:    ori $a2, $zero, 3
+; LA32-NUAL-NEXT:    bl memcmp
+; LA32-NUAL-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, 16
+; LA32-NUAL-NEXT:    ret
+;
+; LA64-NUAL-LABEL: memcmp_size_3:
+; LA64-NUAL:       # %bb.0: # %entry
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, -16
+; LA64-NUAL-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NUAL-NEXT:    ori $a2, $zero, 3
+; LA64-NUAL-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NUAL-NEXT:    jirl $ra, $ra, 0
+; LA64-NUAL-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, 16
+; LA64-NUAL-NEXT:    ret
 entry:
   %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 3)
   ret i32 %memcmp
 }
 
 define i32 @memcmp_size_4(ptr %s1, ptr %s2) nounwind {
-; LA32-LABEL: memcmp_size_4:
-; LA32:       # %bb.0: # %entry
-; LA32-NEXT:    addi.w $sp, $sp, -16
-; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    ori $a2, $zero, 4
-; LA32-NEXT:    bl memcmp
-; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
-; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    ret
+; LA32-UAL-LABEL: memcmp_size_4:
+; LA32-UAL:       # %bb.0: # %entry
+; LA32-UAL-NEXT:    ld.w $a0, $a0, 0
+; LA32-UAL-NEXT:    ld.w $a1, $a1, 0
+; LA32-UAL-NEXT:    srli.w $a2, $a0, 8
+; LA32-UAL-NEXT:    lu12i.w $a3, 15
+; LA32-UAL-NEXT:    ori $a3, $a3, 3840
+; LA32-UAL-NEXT:    and $a2, $a2, $a3
+; LA32-UAL-NEXT:    srli.w $a4, $a0, 24
+; LA32-UAL-NEXT:    or $a2, $a2, $a4
+; LA32-UAL-NEXT:    and $a4, $a0, $a3
+; LA32-UAL-NEXT:    slli.w $a4, $a4, 8
+; LA32-UAL-NEXT:    slli.w $a0, $a0, 24
+; LA32-UAL-NEXT:    or $a0, $a0, $a4
+; LA32-UAL-NEXT:    or $a0, $a0, $a2
+; LA32-UAL-NEXT:    srli.w $a2, $a1, 8
+; LA32-UAL-NEXT:    and $a2, $a2, $a3
+; LA32-UAL-NEXT:    srli.w $a4, $a1, 24
+; LA32-UAL-NEXT:    or $a2, $a2, $a4
+; LA32-UAL-NEXT:    and $a3, $a1, $a3
+; LA32-UAL-NEXT:    slli.w $a3, $a3, 8
+; LA32-UAL-NEXT:    slli.w $a1, $a1, 24
+; LA32-UAL-NEXT:    or $a1, $a1, $a3
+; LA32-UAL-NEXT:    or $a1, $a1, $a2
+; LA32-UAL-NEXT:    sltu $a2, $a0, $a1
+; LA32-UAL-NEXT:    sltu $a0, $a1, $a0
+; LA32-UAL-NEXT:    sub.w $a0, $a0, $a2
+; LA32-UAL-NEXT:    ret
 ;
-; LA64-LABEL: memcmp_size_4:
-; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    addi.d $sp, $sp, -16
-; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    ori $a2, $zero, 4
-; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
-; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    ret
+; LA64-UAL-LABEL: memcmp_size_4:
+; LA64-UAL:       # %bb.0: # %entry
+; LA64-UAL-NEXT:    ld.w $a0, $a0, 0
+; LA64-UAL-NEXT:    ld.w $a1, $a1, 0
+; LA64-UAL-NEXT:    revb.2w $a0, $a0
+; LA64-UAL-NEXT:    addi.w $a0, $a0, 0
+; LA64-UAL-NEXT:    revb.2w $a1, $a1
+; LA64-UAL-NEXT:    addi.w $a1, $a1, 0
+; LA64-UAL-NEXT:    sltu $a2, $a0, $a1
+; LA64-UAL-NEXT:    sltu $a0, $a1, $a0
+; LA64-UAL-NEXT:    sub.d $a0, $a0, $a2
+; LA64-UAL-NEXT:    ret
+;
+; LA32-NUAL-LABEL: memcmp_size_4:
+; LA32-NUAL:       # %bb.0: # %entry
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, -16
+; LA32-NUAL-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NUAL-NEXT:    ori $a2, $zero, 4
+; LA32-NUAL-NEXT:    bl memcmp
+; LA32-NUAL-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, 16
+; LA32-NUAL-NEXT:    ret
+;
+; LA64-NUAL-LABEL: memcmp_size_4:
+; LA64-NUAL:       # %bb.0: # %entry
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, -16
+; LA64-NUAL-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NUAL-NEXT:    ori $a2, $zero, 4
+; LA64-NUAL-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NUAL-NEXT:    jirl $ra, $ra, 0
+; LA64-NUAL-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, 16
+; LA64-NUAL-NEXT:    ret
 entry:
   %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 4)
   ret i32 %memcmp
 }
 
 define i32 @memcmp_size_5(ptr %s1, ptr %s2) nounwind {
-; LA32-LABEL: memcmp_size_5:
-; LA32:       # %bb.0: # %entry
-; LA32-NEXT:    addi.w $sp, $sp, -16
-; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    ori $a2, $zero, 5
-; LA32-NEXT:    bl memcmp
-; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
-; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    ret
+; LA32-UAL-LABEL: memcmp_size_5:
+; LA32-UAL:       # %bb.0: # %entry
+; LA32-UAL-NEXT:    ld.w $a2, $a0, 0
+; LA32-UAL-NEXT:    ld.w $a3, $a1, 0
+; LA32-UAL-NEXT:    srli.w $a4, $a2, 8
+; LA32-UAL-NEXT:    lu12i.w $a5, 15
+; LA32-UAL-NEXT:    ori $a5, $a5, 3840
+; LA32-UAL-NEXT:    and $a4, $a4, $a5
+; LA32-UAL-NEXT:    srli.w $a6, $a2, 24
+; LA32-UAL-NEXT:    or $a4, $a4, $a6
+; LA32-UAL-NEXT:    and $a6, $a2, $a5
+; LA32-UAL-NEXT:    slli.w $a6, $a6, 8
+; LA32-UAL-NEXT:    slli.w $a2, $a2, 24
+; LA32-UAL-NEXT:    or $a2, $a2, $a6
+; LA32-UAL-NEXT:    or $a2, $a2, $a4
+; LA32-UAL-NEXT:    srli.w $a4, $a3, 8
+; LA32-UAL-NEXT:    and $a4, $a4, $a5
+; LA32-UAL-NEXT:    srli.w $a6, $a3, 24
+; LA32-UAL-NEXT:    or $a4, $a4, $a6
+; LA32-UAL-NEXT:    and $a5, $a3, $a5
+; LA32-UAL-NEXT:    slli.w $a5, $a5, 8
+; LA32-UAL-NEXT:    slli.w $a3, $a3, 24
+; LA32-UAL-NEXT:    or $a3, $a3, $a5
+; LA32-UAL-NEXT:    or $a3, $a3, $a4
+; LA32-UAL-NEXT:    bne $a2, $a3, .LBB28_2
+; LA32-UAL-NEXT:  # %bb.1: # %loadbb1
+; LA32-UAL-NEXT:    ld.bu $a0, $a0, 4
+; LA32-UAL-NEXT:    ld.bu $a1, $a1, 4
+; LA32-UAL-NEXT:    sub.w $a0, $a0, $a1
+; LA32-UAL-NEXT:    ret
+; LA32-UAL-NEXT:  .LBB28_2: # %res_block
+; LA32-UAL-NEXT:    sltu $a0, $a2, $a3
+; LA32-UAL-NEXT:    sub.w $a0, $zero, $a0
+; LA32-UAL-NEXT:    ori $a0, $a0, 1
+; LA32-UAL-NEXT:    ret
 ;
-; LA64-LABEL: memcmp_size_5:
-; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    addi.d $sp, $sp, -16
-; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    ori $a2, $zero, 5
-; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
-; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    ret
+; LA64-UAL-LABEL: memcmp_size_5:
+; LA64-UAL:       # %bb.0: # %entry
+; LA64-UAL-NEXT:    ld.w $a2, $a0, 0
+; LA64-UAL-NEXT:    ld.w $a3, $a1, 0
+; LA64-UAL-NEXT:    revb.2w $a2, $a2
+; LA64-UAL-NEXT:    addi.w $a2, $a2, 0
+; LA64-UAL-NEXT:    revb.2w $a3, $a3
+; LA64-UAL-NEXT:    addi.w $a3, $a3, 0
+; LA64-UAL-NEXT:    bne $a2, $a3, .LBB28_2
+; LA64-UAL-NEXT:  # %bb.1: # %loadbb1
+; LA64-UAL-NEXT:    ld.bu $a0, $a0, 4
+; LA64-UAL-NEXT:    ld.bu $a1, $a1, 4
+; LA64-UAL-NEXT:    sub.d $a0, $a0, $a1
+; LA64-UAL-NEXT:    ret
+; LA64-UAL-NEXT:  .LBB28_2: # %res_block
+; LA64-UAL-NEXT:    sltu $a0, $a2, $a3
+; LA64-UAL-NEXT:    sub.d $a0, $zero, $a0
+; LA64-UAL-NEXT:    ori $a0, $a0, 1
+; LA64-UAL-NEXT:    ret
+;
+; LA32-NUAL-LABEL: memcmp_size_5:
+; LA32-NUAL:       # %bb.0: # %entry
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, -16
+; LA32-NUAL-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NUAL-NEXT:    ori $a2, $zero, 5
+; LA32-NUAL-NEXT:    bl memcmp
+; LA32-NUAL-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, 16
+; LA32-NUAL-NEXT:    ret
+;
+; LA64-NUAL-LABEL: memcmp_size_5:
+; LA64-NUAL:       # %bb.0: # %entry
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, -16
+; LA64-NUAL-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NUAL-NEXT:    ori $a2, $zero, 5
+; LA64-NUAL-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NUAL-NEXT:    jirl $ra, $ra, 0
+; LA64-NUAL-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, 16
+; LA64-NUAL-NEXT:    ret
 entry:
   %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 5)
   ret i32 %memcmp
 }
 
 define i32 @memcmp_size_6(ptr %s1, ptr %s2) nounwind {
-; LA32-LABEL: memcmp_size_6:
-; LA32:       # %bb.0: # %entry
-; LA32-NEXT:    addi.w $sp, $sp, -16
-; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    ori $a2, $zero, 6
-; LA32-NEXT:    bl memcmp
-; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
-; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    ret
+; LA32-UAL-LABEL: memcmp_size_6:
+; LA32-UAL:       # %bb.0: # %entry
+; LA32-UAL-NEXT:    ld.w $a3, $a0, 0
+; LA32-UAL-NEXT:    ld.w $a4, $a1, 0
+; LA32-UAL-NEXT:    srli.w $a5, $a3, 8
+; LA32-UAL-NEXT:    lu12i.w $a2, 15
+; LA32-UAL-NEXT:    ori $a6, $a2, 3840
+; LA32-UAL-NEXT:    and $a5, $a5, $a6
+; LA32-UAL-NEXT:    srli.w $a7, $a3, 24
+; LA32-UAL-NEXT:    or $a5, $a5, $a7
+; LA32-UAL-NEXT:    and $a7, $a3, $a6
+; LA32-UAL-NEXT:    slli.w $a7, $a7, 8
+; LA32-UAL-NEXT:    slli.w $a3, $a3, 24
+; LA32-UAL-NEXT:    or $a3, $a3, $a7
+; LA32-UAL-NEXT:    or $a3, $a3, $a5
+; LA32-UAL-NEXT:    srli.w $a5, $a4, 8
+; LA32-UAL-NEXT:    and $a5, $a5, $a6
+; LA32-UAL-NEXT:    srli.w $a7, $a4, 24
+; LA32-UAL-NEXT:    or $a5, $a5, $a7
+; LA32-UAL-NEXT:    and $a6, $a4, $a6
+; LA32-UAL-NEXT:    slli.w $a6, $a6, 8
+; LA32-UAL-NEXT:    slli.w $a4, $a4, 24
+; LA32-UAL-NEXT:    or $a4, $a4, $a6
+; LA32-UAL-NEXT:    or $a4, $a4, $a5
+; LA32-UAL-NEXT:    bne $a3, $a4, .LBB29_3
+; LA32-UAL-NEXT:  # %bb.1: # %loadbb1
+; LA32-UAL-NEXT:    ld.hu $a0, $a0, 4
+; LA32-UAL-NEXT:    ld.hu $a1, $a1, 4
+; LA32-UAL-NEXT:    srli.w $a3, $a0, 8
+; LA32-UAL-NEXT:    slli.w $a0, $a0, 8
+; LA32-UAL-NEXT:    or $a0, $a0, $a3
+; LA32-UAL-NEXT:    srli.w $a3, $a1, 8
+; LA32-UAL-NEXT:    slli.w $a1, $a1, 8
+; LA32-UAL-NEXT:    or $a1, $a1, $a3
+; LA32-UAL-NEXT:    ori $a2, $a2, 4095
+; LA32-UAL-NEXT:    and $a3, $a0, $a2
+; LA32-UAL-NEXT:    and $a4, $a1, $a2
+; LA32-UAL-NEXT:    bne $a3, $a4, .LBB29_3
+; LA32-UAL-NEXT:  # %bb.2:
+; LA32-UAL-NEXT:    move $a0, $zero
+; LA32-UAL-NEXT:    ret
+; LA32-UAL-NEXT:  .LBB29_3: # %res_block
+; LA32-UAL-NEXT:    sltu $a0, $a3, $a4
+; LA32-UAL-NEXT:    sub.w $a0, $zero, $a0
+; LA32-UAL-NEXT:    ori $a0, $a0, 1
+; LA32-UAL-NEXT:    ret
 ;
-; LA64-LABEL: memcmp_size_6:
-; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    addi.d $sp, $sp, -16
-; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    ori $a2, $zero, 6
-; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
-; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    ret
+; LA64-UAL-LABEL: memcmp_size_6:
+; LA64-UAL:       # %bb.0: # %entry
+; LA64-UAL-NEXT:    ld.w $a2, $a0, 0
+; LA64-UAL-NEXT:    ld.w $a3, $a1, 0
+; LA64-UAL-NEXT:    revb.2w $a2, $a2
+; LA64-UAL-NEXT:    addi.w $a4, $a2, 0
+; LA64-UAL-NEXT:    revb.2w $a3, $a3
+; LA64-UAL-NEXT:    addi.w $a5, $a3, 0
+; LA64-UAL-NEXT:    bne $a4, $a5, .LBB29_3
+; LA64-UAL-NEXT:  # %bb.1: # %loadbb1
+; LA64-UAL-NEXT:    ld.h $a0, $a0, 4
+; LA64-UAL-NEXT:    ld.h $a1, $a1, 4
+; LA64-UAL-NEXT:    revb.2h $a0, $a0
+; LA64-UAL-NEXT:    revb.2h $a1, $a1
+; LA64-UAL-NEXT:    bstrpick.d $a2, $a0, 15, 0
+; LA64-UAL-NEXT:    bstrpick.d $a3, $a1, 15, 0
+; LA64-UAL-NEXT:    bne $a2, $a3, .LBB29_3
+; LA64-UAL-NEXT:  # %bb.2:
+; LA64-UAL-NEXT:    move $a0, $zero
+; LA64-UAL-NEXT:    ret
+; LA64-UAL-NEXT:  .LBB29_3: # %res_block
+; LA64-UAL-NEXT:    addi.w $a0, $a3, 0
+; LA64-UAL-NEXT:    addi.w $a1, $a2, 0
+; LA64-UAL-NEXT:    sltu $a0, $a1, $a0
+; LA64-UAL-NEXT:    sub.d $a0, $zero, $a0
+; LA64-UAL-NEXT:    ori $a0, $a0, 1
+; LA64-UAL-NEXT:    ret
+;
+; LA32-NUAL-LABEL: memcmp_size_6:
+; LA32-NUAL:       # %bb.0: # %entry
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, -16
+; LA32-NUAL-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NUAL-NEXT:    ori $a2, $zero, 6
+; LA32-NUAL-NEXT:    bl memcmp
+; LA32-NUAL-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, 16
+; LA32-NUAL-NEXT:    ret
+;
+; LA64-NUAL-LABEL: memcmp_size_6:
+; LA64-NUAL:       # %bb.0: # %entry
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, -16
+; LA64-NUAL-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NUAL-NEXT:    ori $a2, $zero, 6
+; LA64-NUAL-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NUAL-NEXT:    jirl $ra, $ra, 0
+; LA64-NUAL-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, 16
+; LA64-NUAL-NEXT:    ret
 entry:
   %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 6)
   ret i32 %memcmp
 }
 
 define i32 @memcmp_size_7(ptr %s1, ptr %s2) nounwind {
-; LA32-LABEL: memcmp_size_7:
-; LA32:       # %bb.0: # %entry
-; LA32-NEXT:    addi.w $sp, $sp, -16
-; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    ori $a2, $zero, 7
-; LA32-NEXT:    bl memcmp
-; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
-; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    ret
+; LA32-UAL-LABEL: memcmp_size_7:
+; LA32-UAL:       # %bb.0: # %entry
+; LA32-UAL-NEXT:    ld.w $a3, $a0, 0
+; LA32-UAL-NEXT:    ld.w $a4, $a1, 0
+; LA32-UAL-NEXT:    srli.w $a5, $a3, 8
+; LA32-UAL-NEXT:    lu12i.w $a2, 15
+; LA32-UAL-NEXT:    ori $a2, $a2, 3840
+; LA32-UAL-NEXT:    and $a5, $a5, $a2
+; LA32-UAL-NEXT:    srli.w $a6, $a3, 24
+; LA32-UAL-NEXT:    or $a5, $a5, $a6
+; LA32-UAL-NEXT:    and $a6, $a3, $a2
+; LA32-UAL-NEXT:    slli.w $a6, $a6, 8
+; LA32-UAL-NEXT:    slli.w $a3, $a3, 24
+; LA32-UAL-NEXT:    or $a3, $a3, $a6
+; LA32-UAL-NEXT:    or $a3, $a3, $a5
+; LA32-UAL-NEXT:    srli.w $a5, $a4, 8
+; LA32-UAL-NEXT:    and $a5, $a5, $a2
+; LA32-UAL-NEXT:    srli.w $a6, $a4, 24
+; LA32-UAL-NEXT:    or $a5, $a5, $a6
+; LA32-UAL-NEXT:    and $a6, $a4, $a2
+; LA32-UAL-NEXT:    slli.w $a6, $a6, 8
+; LA32-UAL-NEXT:    slli.w $a4, $a4, 24
+; LA32-UAL-NEXT:    or $a4, $a4, $a6
+; LA32-UAL-NEXT:    or $a4, $a4, $a5
+; LA32-UAL-NEXT:    bne $a3, $a4, .LBB30_3
+; LA32-UAL-NEXT:  # %bb.1: # %loadbb1
+; LA32-UAL-NEXT:    ld.w $a0, $a0, 3
+; LA32-UAL-NEXT:    ld.w $a1, $a1, 3
+; LA32-UAL-NEXT:    srli.w $a3, $a0, 8
+; LA32-UAL-NEXT:    and $a3, $a3, $a2
+; LA32-UAL-NEXT:    srli.w $a4, $a0, 24
+; LA32-UAL-NEXT:    or $a3, $a3, $a4
+; LA32-UAL-NEXT:    and $a4, $a0, $a2
+; LA32-UAL-NEXT:    slli.w $a4, $a4, 8
+; LA32-UAL-NEXT:    slli.w $a0, $a0, 24
+; LA32-UAL-NEXT:    or $a0, $a0, $a4
+; LA32-UAL-NEXT:    or $a3, $a0, $a3
+; LA32-UAL-NEXT:    srli.w $a0, $a1, 8
+; LA32-UAL-NEXT:    and $a0, $a0, $a2
+; LA32-UAL-NEXT:    srli.w $a4, $a1, 24
+; LA32-UAL-NEXT:    or $a0, $a0, $a4
+; LA32-UAL-NEXT:    and $a2, $a1, $a2
+; LA32-UAL-NEXT:    slli.w $a2, $a2, 8
+; LA32-UAL-NEXT:    slli.w $a1, $a1, 24
+; LA32-UAL-NEXT:    or $a1, $a1, $a2
+; LA32-UAL-NEXT:    or $a4, $a1, $a0
+; LA32-UAL-NEXT:    bne $a3, $a4, .LBB30_3
+; LA32-UAL-NEXT:  # %bb.2:
+; LA32-UAL-NEXT:    move $a0, $zero
+; LA32-UAL-NEXT:    ret
+; LA32-UAL-NEXT:  .LBB30_3: # %res_block
+; LA32-UAL-NEXT:    sltu $a0, $a3, $a4
+; LA32-UAL-NEXT:    sub.w $a0, $zero, $a0
+; LA32-UAL-NEXT:    ori $a0, $a0, 1
+; LA32-UAL-NEXT:    ret
 ;
-; LA64-LABEL: memcmp_size_7:
-; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    addi.d $sp, $sp, -16
-; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    ori $a2, $zero, 7
-; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
-; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    ret
+; LA64-UAL-LABEL: memcmp_size_7:
+; LA64-UAL:       # %bb.0: # %entry
+; LA64-UAL-NEXT:    ld.w $a2, $a0, 0
+; LA64-UAL-NEXT:    ld.w $a3, $a1, 0
+; LA64-UAL-NEXT:    revb.2w $a2, $a2
+; LA64-UAL-NEXT:    addi.w $a4, $a2, 0
+; LA64-UAL-NEXT:    revb.2w $a3, $a3
+; LA64-UAL-NEXT:    addi.w $a5, $a3, 0
+; LA64-UAL-NEXT:    bne $a4, $a5, .LBB30_3
+; LA64-UAL-NEXT:  # %bb.1: # %loadbb1
+; LA64-UAL-NEXT:    ld.w $a0, $a0, 3
+; LA64-UAL-NEXT:    ld.w $a1, $a1, 3
+; LA64-UAL-NEXT:    revb.2w $a2, $a0
+; LA64-UAL-NEXT:    addi.w $a0, $a2, 0
+; LA64-UAL-NEXT:    revb.2w $a3, $a1
+; LA64-UAL-NEXT:    addi.w $a1, $a3, 0
+; LA64-UAL-NEXT:    bne $a0, $a1, .LBB30_3
+; LA64-UAL-NEXT:  # %bb.2:
+; LA64-UAL-NEXT:    move $a0, $zero
+; LA64-UAL-NEXT:    ret
+; LA64-UAL-NEXT:  .LBB30_3: # %res_block
+; LA64-UAL-NEXT:    addi.w $a0, $a3, 0
+; LA64-UAL-NEXT:    addi.w $a1, $a2, 0
+; LA64-UAL-NEXT:    sltu $a0, $a1, $a0
+; LA64-UAL-NEXT:    sub.d $a0, $zero, $a0
+; LA64-UAL-NEXT:    ori $a0, $a0, 1
+; LA64-UAL-NEXT:    ret
+;
+; LA32-NUAL-LABEL: memcmp_size_7:
+; LA32-NUAL:       # %bb.0: # %entry
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, -16
+; LA32-NUAL-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NUAL-NEXT:    ori $a2, $zero, 7
+; LA32-NUAL-NEXT:    bl memcmp
+; LA32-NUAL-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, 16
+; LA32-NUAL-NEXT:    ret
+;
+; LA64-NUAL-LABEL: memcmp_size_7:
+; LA64-NUAL:       # %bb.0: # %entry
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, -16
+; LA64-NUAL-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NUAL-NEXT:    ori $a2, $zero, 7
+; LA64-NUAL-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NUAL-NEXT:    jirl $ra, $ra, 0
+; LA64-NUAL-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, 16
+; LA64-NUAL-NEXT:    ret
 entry:
   %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 7)
   ret i32 %memcmp
 }
 
 define i32 @memcmp_size_8(ptr %s1, ptr %s2) nounwind {
-; LA32-LABEL: memcmp_size_8:
-; LA32:       # %bb.0: # %entry
-; LA32-NEXT:    addi.w $sp, $sp, -16
-; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    ori $a2, $zero, 8
-; LA32-NEXT:    bl memcmp
-; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
-; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    ret
+; LA32-UAL-LABEL: memcmp_size_8:
+; LA32-UAL:       # %bb.0: # %entry
+; LA32-UAL-NEXT:    ld.w $a3, $a0, 0
+; LA32-UAL-NEXT:    ld.w $a4, $a1, 0
+; LA32-UAL-NEXT:    srli.w $a5, $a3, 8
+; LA32-UAL-NEXT:    lu12i.w $a2, 15
+; LA32-UAL-NEXT:    ori $a2, $a2, 3840
+; LA32-UAL-NEXT:    and $a5, $a5, $a2
+; LA32-UAL-NEXT:    srli.w $a6, $a3, 24
+; LA32-UAL-NEXT:    or $a5, $a5, $a6
+; LA32-UAL-NEXT:    and $a6, $a3, $a2
+; LA32-UAL-NEXT:    slli.w $a6, $a6, 8
+; LA32-UAL-NEXT:    slli.w $a3, $a3, 24
+; LA32-UAL-NEXT:    or $a3, $a3, $a6
+; LA32-UAL-NEXT:    or $a3, $a3, $a5
+; LA32-UAL-NEXT:    srli.w $a5, $a4, 8
+; LA32-UAL-NEXT:    and $a5, $a5, $a2
+; LA32-UAL-NEXT:    srli.w $a6, $a4, 24
+; LA32-UAL-NEXT:    or $a5, $a5, $a6
+; LA32-UAL-NEXT:    and $a6, $a4, $a2
+; LA32-UAL-NEXT:    slli.w $a6, $a6, 8
+; LA32-UAL-NEXT:    slli.w $a4, $a4, 24
+; LA32-UAL-NEXT:    or $a4, $a4, $a6
+; LA32-UAL-NEXT:    or $a4, $a4, $a5
+; LA32-UAL-NEXT:    bne $a3, $a4, .LBB31_3
+; LA32-UAL-NEXT:  # %bb.1: # %loadbb1
+; LA32-UAL-NEXT:    ld.w $a0, $a0, 4
+; LA32-UAL-NEXT:    ld.w $a1, $a1, 4
+; LA32-UAL-NEXT:    srli.w $a3, $a0, 8
+; LA32-UAL-NEXT:    and $a3, $a3, $a2
+; LA32-UAL-NEXT:    srli.w $a4, $a0, 24
+; LA32-UAL-NEXT:    or $a3, $a3, $a4
+; LA32-UAL-NEXT:    and $a4, $a0, $a2
+; LA32-UAL-NEXT:    slli.w $a4, $a4, 8
+; LA32-UAL-NEXT:    slli.w $a0, $a0, 24
+; LA32-UAL-NEXT:    or $a0, $a0, $a4
+; LA32-UAL-NEXT:    or $a3, $a0, $a3
+; LA32-UAL-NEXT:    srli.w $a0, $a1, 8
+; LA32-UAL-NEXT:    and $a0, $a0, $a2
+; LA32-UAL-NEXT:    srli.w $a4, $a1, 24
+; LA32-UAL-NEXT:    or $a0, $a0, $a4
+; LA32-UAL-NEXT:    and $a2, $a1, $a2
+; LA32-UAL-NEXT:    slli.w $a2, $a2, 8
+; LA32-UAL-NEXT:    slli.w $a1, $a1, 24
+; LA32-UAL-NEXT:    or $a1, $a1, $a2
+; LA32-UAL-NEXT:    or $a4, $a1, $a0
+; LA32-UAL-NEXT:    bne $a3, $a4, .LBB31_3
+; LA32-UAL-NEXT:  # %bb.2:
+; LA32-UAL-NEXT:    move $a0, $zero
+; LA32-UAL-NEXT:    ret
+; LA32-UAL-NEXT:  .LBB31_3: # %res_block
+; LA32-UAL-NEXT:    sltu $a0, $a3, $a4
+; LA32-UAL-NEXT:    sub.w $a0, $zero, $a0
+; LA32-UAL-NEXT:    ori $a0, $a0, 1
+; LA32-UAL-NEXT:    ret
 ;
-; LA64-LABEL: memcmp_size_8:
-; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    addi.d $sp, $sp, -16
-; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    ori $a2, $zero, 8
-; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
-; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    ret
+; LA64-UAL-LABEL: memcmp_size_8:
+; LA64-UAL:       # %bb.0: # %entry
+; LA64-UAL-NEXT:    ld.d $a0, $a0, 0
+; LA64-UAL-NEXT:    ld.d $a1, $a1, 0
+; LA64-UAL-NEXT:    revb.d $a0, $a0
+; LA64-UAL-NEXT:    revb.d $a1, $a1
+; LA64-UAL-NEXT:    sltu $a2, $a0, $a1
+; LA64-UAL-NEXT:    sltu $a0, $a1, $a0
+; LA64-UAL-NEXT:    sub.d $a0, $a0, $a2
+; LA64-UAL-NEXT:    ret
+;
+; LA32-NUAL-LABEL: memcmp_size_8:
+; LA32-NUAL:       # %bb.0: # %entry
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, -16
+; LA32-NUAL-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NUAL-NEXT:    ori $a2, $zero, 8
+; LA32-NUAL-NEXT:    bl memcmp
+; LA32-NUAL-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, 16
+; LA32-NUAL-NEXT:    ret
+;
+; LA64-NUAL-LABEL: memcmp_size_8:
+; LA64-NUAL:       # %bb.0: # %entry
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, -16
+; LA64-NUAL-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NUAL-NEXT:    ori $a2, $zero, 8
+; LA64-NUAL-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NUAL-NEXT:    jirl $ra, $ra, 0
+; LA64-NUAL-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, 16
+; LA64-NUAL-NEXT:    ret
 entry:
   %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 8)
   ret i32 %memcmp
 }
 
 define i32 @memcmp_size_15(ptr %s1, ptr %s2) nounwind {
-; LA32-LABEL: memcmp_size_15:
-; LA32:       # %bb.0: # %entry
-; LA32-NEXT:    addi.w $sp, $sp, -16
-; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    ori $a2, $zero, 15
-; LA32-NEXT:    bl memcmp
-; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
-; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    ret
+; LA32-UAL-LABEL: memcmp_size_15:
+; LA32-UAL:       # %bb.0: # %entry
+; LA32-UAL-NEXT:    ld.w $a3, $a0, 0
+; LA32-UAL-NEXT:    ld.w $a4, $a1, 0
+; LA32-UAL-NEXT:    srli.w $a5, $a3, 8
+; LA32-UAL-NEXT:    lu12i.w $a2, 15
+; LA32-UAL-NEXT:    ori $a2, $a2, 3840
+; LA32-UAL-NEXT:    and $a5, $a5, $a2
+; LA32-UAL-NEXT:    srli.w $a6, $a3, 24
+; LA32-UAL-NEXT:    or $a5, $a5, $a6
+; LA32-UAL-NEXT:    and $a6, $a3, $a2
+; LA32-UAL-NEXT:    slli.w $a6, $a6, 8
+; LA32-UAL-NEXT:    slli.w $a3, $a3, 24
+; LA32-UAL-NEXT:    or $a3, $a3, $a6
+; LA32-UAL-NEXT:    or $a3, $a3, $a5
+; LA32-UAL-NEXT:    srli.w $a5, $a4, 8
+; LA32-UAL-NEXT:    and $a5, $a5, $a2
+; LA32-UAL-NEXT:    srli.w $a6, $a4, 24
+; LA32-UAL-NEXT:    or $a5, $a5, $a6
+; LA32-UAL-NEXT:    and $a6, $a4, $a2
+; LA32-UAL-NEXT:    slli.w $a6, $a6, 8
+; LA32-UAL-NEXT:    slli.w $a4, $a4, 24
+; LA32-UAL-NEXT:    or $a4, $a4, $a6
+; LA32-UAL-NEXT:    or $a4, $a4, $a5
+; LA32-UAL-NEXT:    bne $a3, $a4, .LBB32_5
+; LA32-UAL-NEXT:  # %bb.1: # %loadbb1
+; LA32-UAL-NEXT:    ld.w $a3, $a0, 4
+; LA32-UAL-NEXT:    ld.w $a4, $a1, 4
+; LA32-UAL-NEXT:    srli.w $a5, $a3, 8
+; LA32-UAL-NEXT:    and $a5, $a5, $a2
+; LA32-UAL-NEXT:    srli.w $a6, $a3, 24
+; LA32-UAL-NEXT:    or $a5, $a5, $a6
+; LA32-UAL-NEXT:    and $a6, $a3, $a2
+; LA32-UAL-NEXT:    slli.w $a6, $a6, 8
+; LA32-UAL-NEXT:    slli.w $a3, $a3, 24
+; LA32-UAL-NEXT:    or $a3, $a3, $a6
+; LA32-UAL-NEXT:    or $a3, $a3, $a5
+; LA32-UAL-NEXT:    srli.w $a5, $a4, 8
+; LA32-UAL-NEXT:    and $a5, $a5, $a2
+; LA32-UAL-NEXT:    srli.w $a6, $a4, 24
+; LA32-UAL-NEXT:    or $a5, $a5, $a6
+; LA32-UAL-NEXT:    and $a6, $a4, $a2
+; LA32-UAL-NEXT:    slli.w $a6, $a6, 8
+; LA32-UAL-NEXT:    slli.w $a4, $a4, 24
+; LA32-UAL-NEXT:    or $a4, $a4, $a6
+; LA32-UAL-NEXT:    or $a4, $a4, $a5
+; LA32-UAL-NEXT:    bne $a3, $a4, .LBB32_5
+; LA32-UAL-NEXT:  # %bb.2: # %loadbb2
+; LA32-UAL-NEXT:    ld.w $a3, $a0, 8
+; LA32-UAL-NEXT:    ld.w $a4, $a1, 8
+; LA32-UAL-NEXT:    srli.w $a5, $a3, 8
+; LA32-UAL-NEXT:    and $a5, $a5, $a2
+; LA32-UAL-NEXT:    srli.w $a6, $a3, 24
+; LA32-UAL-NEXT:    or $a5, $a5, $a6
+; LA32-UAL-NEXT:    and $a6, $a3, $a2
+; LA32-UAL-NEXT:    slli.w $a6, $a6, 8
+; LA32-UAL-NEXT:    slli.w $a3, $a3, 24
+; LA32-UAL-NEXT:    or $a3, $a3, $a6
+; LA32-UAL-NEXT:    or $a3, $a3, $a5
+; LA32-UAL-NEXT:    srli.w $a5, $a4, 8
+; LA32-UAL-NEXT:    and $a5, $a5, $a2
+; LA32-UAL-NEXT:    srli.w $a6, $a4, 24
+; LA32-UAL-NEXT:    or $a5, $a5, $a6
+; LA32-UAL-NEXT:    and $a6, $a4, $a2
+; LA32-UAL-NEXT:    slli.w $a6, $a6, 8
+; LA32-UAL-NEXT:    slli.w $a4, $a4, 24
+; LA32-UAL-NEXT:    or $a4, $a4, $a6
+; LA32-UAL-NEXT:    or $a4, $a4, $a5
+; LA32-UAL-NEXT:    bne $a3, $a4, .LBB32_5
+; LA32-UAL-NEXT:  # %bb.3: # %loadbb3
+; LA32-UAL-NEXT:    ld.w $a0, $a0, 11
+; LA32-UAL-NEXT:    ld.w $a1, $a1, 11
+; LA32-UAL-NEXT:    srli.w $a3, $a0, 8
+; LA32-UAL-NEXT:    and $a3, $a3, $a2
+; LA32-UAL-NEXT:    srli.w $a4, $a0, 24
+; LA32-UAL-NEXT:    or $a3, $a3, $a4
+; LA32-UAL-NEXT:    and $a4, $a0, $a2
+; LA32-UAL-NEXT:    slli.w $a4, $a4, 8
+; LA32-UAL-NEXT:    slli.w $a0, $a0, 24
+; LA32-UAL-NEXT:    or $a0, $a0, $a4
+; LA32-UAL-NEXT:    or $a3, $a0, $a3
+; LA32-UAL-NEXT:    srli.w $a0, $a1, 8
+; LA32-UAL-NEXT:    and $a0, $a0, $a2
+; LA32-UAL-NEXT:    srli.w $a4, $a1, 24
+; LA32-UAL-NEXT:    or $a0, $a0, $a4
+; LA32-UAL-NEXT:    and $a2, $a1, $a2
+; LA32-UAL-NEXT:    slli.w $a2, $a2, 8
+; LA32-UAL-NEXT:    slli.w $a1, $a1, 24
+; LA32-UAL-NEXT:    or $a1, $a1, $a2
+; LA32-UAL-NEXT:    or $a4, $a1, $a0
+; LA32-UAL-NEXT:    bne $a3, $a4, .LBB32_5
+; LA32-UAL-NEXT:  # %bb.4:
+; LA32-UAL-NEXT:    move $a0, $zero
+; LA32-UAL-NEXT:    ret
+; LA32-UAL-NEXT:  .LBB32_5: # %res_block
+; LA32-UAL-NEXT:    sltu $a0, $a3, $a4
+; LA32-UAL-NEXT:    sub.w $a0, $zero, $a0
+; LA32-UAL-NEXT:    ori $a0, $a0, 1
+; LA32-UAL-NEXT:    ret
 ;
-; LA64-LABEL: memcmp_size_15:
-; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    addi.d $sp, $sp, -16
-; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    ori $a2, $zero, 15
-; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
-; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    ret
+; LA64-UAL-LABEL: memcmp_size_15:
+; LA64-UAL:       # %bb.0: # %entry
+; LA64-UAL-NEXT:    ld.d $a2, $a0, 0
+; LA64-UAL-NEXT:    ld.d $a3, $a1, 0
+; LA64-UAL-NEXT:    revb.d $a2, $a2
+; LA64-UAL-NEXT:    revb.d $a3, $a3
+; LA64-UAL-NEXT:    bne $a2, $a3, .LBB32_3
+; LA64-UAL-NEXT:  # %bb.1: # %loadbb1
+; LA64-UAL-NEXT:    ld.d $a0, $a0, 7
+; LA64-UAL-NEXT:    ld.d $a1, $a1, 7
+; LA64-UAL-NEXT:    revb.d $a2, $a0
+; LA64-UAL-NEXT:    revb.d $a3, $a1
+; LA64-UAL-NEXT:    bne $a2, $a3, .LBB32_3
+; LA64-UAL-NEXT:  # %bb.2:
+; LA64-UAL-NEXT:    move $a0, $zero
+; LA64-UAL-NEXT:    ret
+; LA64-UAL-NEXT:  .LBB32_3: # %res_block
+; LA64-UAL-NEXT:    sltu $a0, $a2, $a3
+; LA64-UAL-NEXT:    sub.d $a0, $zero, $a0
+; LA64-UAL-NEXT:    ori $a0, $a0, 1
+; LA64-UAL-NEXT:    ret
+;
+; LA32-NUAL-LABEL: memcmp_size_15:
+; LA32-NUAL:       # %bb.0: # %entry
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, -16
+; LA32-NUAL-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NUAL-NEXT:    ori $a2, $zero, 15
+; LA32-NUAL-NEXT:    bl memcmp
+; LA32-NUAL-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, 16
+; LA32-NUAL-NEXT:    ret
+;
+; LA64-NUAL-LABEL: memcmp_size_15:
+; LA64-NUAL:       # %bb.0: # %entry
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, -16
+; LA64-NUAL-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NUAL-NEXT:    ori $a2, $zero, 15
+; LA64-NUAL-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NUAL-NEXT:    jirl $ra, $ra, 0
+; LA64-NUAL-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, 16
+; LA64-NUAL-NEXT:    ret
 entry:
   %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 15)
   ret i32 %memcmp
 }
 
 define i32 @memcmp_size_16(ptr %s1, ptr %s2) nounwind {
-; LA32-LABEL: memcmp_size_16:
-; LA32:       # %bb.0: # %entry
-; LA32-NEXT:    addi.w $sp, $sp, -16
-; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    ori $a2, $zero, 16
-; LA32-NEXT:    bl memcmp
-; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
-; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    ret
+; LA32-UAL-LABEL: memcmp_size_16:
+; LA32-UAL:       # %bb.0: # %entry
+; LA32-UAL-NEXT:    ld.w $a3, $a0, 0
+; LA32-UAL-NEXT:    ld.w $a4, $a1, 0
+; LA32-UAL-NEXT:    srli.w $a5, $a3, 8
+; LA32-UAL-NEXT:    lu12i.w $a2, 15
+; LA32-UAL-NEXT:    ori $a2, $a2, 3840
+; LA32-UAL-NEXT:    and $a5, $a5, $a2
+; LA32-UAL-NEXT:    srli.w $a6, $a3, 24
+; LA32-UAL-NEXT:    or $a5, $a5, $a6
+; LA32-UAL-NEXT:    and $a6, $a3, $a2
+; LA32-UAL-NEXT:    slli.w $a6, $a6, 8
+; LA32-UAL-NEXT:    slli.w $a3, $a3, 24
+; LA32-UAL-NEXT:    or $a3, $a3, $a6
+; LA32-UAL-NEXT:    or $a3, $a3, $a5
+; LA32-UAL-NEXT:    srli.w $a5, $a4, 8
+; LA32-UAL-NEXT:    and $a5, $a5, $a2
+; LA32-UAL-NEXT:    srli.w $a6, $a4, 24
+; LA32-UAL-NEXT:    or $a5, $a5, $a6
+; LA32-UAL-NEXT:    and $a6, $a4, $a2
+; LA32-UAL-NEXT:    slli.w $a6, $a6, 8
+; LA32-UAL-NEXT:    slli.w $a4, $a4, 24
+; LA32-UAL-NEXT:    or $a4, $a4, $a6
+; LA32-UAL-NEXT:    or $a4, $a4, $a5
+; LA32-UAL-NEXT:    bne $a3, $a4, .LBB33_5
+; LA32-UAL-NEXT:  # %bb.1: # %loadbb1
+; LA32-UAL-NEXT:    ld.w $a3, $a0, 4
+; LA32-UAL-NEXT:    ld.w $a4, $a1, 4
+; LA32-UAL-NEXT:    srli.w $a5, $a3, 8
+; LA32-UAL-NEXT:    and $a5, $a5, $a2
+; LA32-UAL-NEXT:    srli.w $a6, $a3, 24
+; LA32-UAL-NEXT:    or $a5, $a5, $a6
+; LA32-UAL-NEXT:    and $a6, $a3, $a2
+; LA32-UAL-NEXT:    slli.w $a6, $a6, 8
+; LA32-UAL-NEXT:    slli.w $a3, $a3, 24
+; LA32-UAL-NEXT:    or $a3, $a3, $a6
+; LA32-UAL-NEXT:    or $a3, $a3, $a5
+; LA32-UAL-NEXT:    srli.w $a5, $a4, 8
+; LA32-UAL-NEXT:    and $a5, $a5, $a2
+; LA32-UAL-NEXT:    srli.w $a6, $a4, 24
+; LA32-UAL-NEXT:    or $a5, $a5, $a6
+; LA32-UAL-NEXT:    and $a6, $a4, $a2
+; LA32-UAL-NEXT:    slli.w $a6, $a6, 8
+; LA32-UAL-NEXT:    slli.w $a4, $a4, 24
+; LA32-UAL-NEXT:    or $a4, $a4, $a6
+; LA32-UAL-NEXT:    or $a4, $a4, $a5
+; LA32-UAL-NEXT:    bne $a3, $a4, .LBB33_5
+; LA32-UAL-NEXT:  # %bb.2: # %loadbb2
+; LA32-UAL-NEXT:    ld.w $a3, $a0, 8
+; LA32-UAL-NEXT:    ld.w $a4, $a1, 8
+; LA32-UAL-NEXT:    srli.w $a5, $a3, 8
+; LA32-UAL-NEXT:    and $a5, $a5, $a2
+; LA32-UAL-NEXT:    srli.w $a6, $a3, 24
+; LA32-UAL-NEXT:    or $a5, $a5, $a6
+; LA32-UAL-NEXT:    and $a6, $a3, $a2
+; LA32-UAL-NEXT:    slli.w $a6, $a6, 8
+; LA32-UAL-NEXT:    slli.w $a3, $a3, 24
+; LA32-UAL-NEXT:    or $a3, $a3, $a6
+; LA32-UAL-NEXT:    or $a3, $a3, $a5
+; LA32-UAL-NEXT:    srli.w $a5, $a4, 8
+; LA32-UAL-NEXT:    and $a5, $a5, $a2
+; LA32-UAL-NEXT:    srli.w $a6, $a4, 24
+; LA32-UAL-NEXT:    or $a5, $a5, $a6
+; LA32-UAL-NEXT:    and $a6, $a4, $a2
+; LA32-UAL-NEXT:    slli.w $a6, $a6, 8
+; LA32-UAL-NEXT:    slli.w $a4, $a4, 24
+; LA32-UAL-NEXT:    or $a4, $a4, $a6
+; LA32-UAL-NEXT:    or $a4, $a4, $a5
+; LA32-UAL-NEXT:    bne $a3, $a4, .LBB33_5
+; LA32-UAL-NEXT:  # %bb.3: # %loadbb3
+; LA32-UAL-NEXT:    ld.w $a0, $a0, 12
+; LA32-UAL-NEXT:    ld.w $a1, $a1, 12
+; LA32-UAL-NEXT:    srli.w $a3, $a0, 8
+; LA32-UAL-NEXT:    and $a3, $a3, $a2
+; LA32-UAL-NEXT:    srli.w $a4, $a0, 24
+; LA32-UAL-NEXT:    or $a3, $a3, $a4
+; LA32-UAL-NEXT:    and $a4, $a0, $a2
+; LA32-UAL-NEXT:    slli.w $a4, $a4, 8
+; LA32-UAL-NEXT:    slli.w $a0, $a0, 24
+; LA32-UAL-NEXT:    or $a0, $a0, $a4
+; LA32-UAL-NEXT:    or $a3, $a0, $a3
+; LA32-UAL-NEXT:    srli.w $a0, $a1, 8
+; LA32-UAL-NEXT:    and $a0, $a0, $a2
+; LA32-UAL-NEXT:    srli.w $a4, $a1, 24
+; LA32-UAL-NEXT:    or $a0, $a0, $a4
+; LA32-UAL-NEXT:    and $a2, $a1, $a2
+; LA32-UAL-NEXT:    slli.w $a2, $a2, 8
+; LA32-UAL-NEXT:    slli.w $a1, $a1, 24
+; LA32-UAL-NEXT:    or $a1, $a1, $a2
+; LA32-UAL-NEXT:    or $a4, $a1, $a0
+; LA32-UAL-NEXT:    bne $a3, $a4, .LBB33_5
+; LA32-UAL-NEXT:  # %bb.4:
+; LA32-UAL-NEXT:    move $a0, $zero
+; LA32-UAL-NEXT:    ret
+; LA32-UAL-NEXT:  .LBB33_5: # %res_block
+; LA32-UAL-NEXT:    sltu $a0, $a3, $a4
+; LA32-UAL-NEXT:    sub.w $a0, $zero, $a0
+; LA32-UAL-NEXT:    ori $a0, $a0, 1
+; LA32-UAL-NEXT:    ret
 ;
-; LA64-LABEL: memcmp_size_16:
-; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    addi.d $sp, $sp, -16
-; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    ori $a2, $zero, 16
-; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
-; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    ret
+; LA64-UAL-LABEL: memcmp_size_16:
+; LA64-UAL:       # %bb.0: # %entry
+; LA64-UAL-NEXT:    ld.d $a2, $a0, 0
+; LA64-UAL-NEXT:    ld.d $a3, $a1, 0
+; LA64-UAL-NEXT:    revb.d $a2, $a2
+; LA64-UAL-NEXT:    revb.d $a3, $a3
+; LA64-UAL-NEXT:    bne $a2, $a3, .LBB33_3
+; LA64-UAL-NEXT:  # %bb.1: # %loadbb1
+; LA64-UAL-NEXT:    ld.d $a0, $a0, 8
+; LA64-UAL-NEXT:    ld.d $a1, $a1, 8
+; LA64-UAL-NEXT:    revb.d $a2, $a0
+; LA64-UAL-NEXT:    revb.d $a3, $a1
+; LA64-UAL-NEXT:    bne $a2, $a3, .LBB33_3
+; LA64-UAL-NEXT:  # %bb.2:
+; LA64-UAL-NEXT:    move $a0, $zero
+; LA64-UAL-NEXT:    ret
+; LA64-UAL-NEXT:  .LBB33_3: # %res_block
+; LA64-UAL-NEXT:    sltu $a0, $a2, $a3
+; LA64-UAL-NEXT:    sub.d $a0, $zero, $a0
+; LA64-UAL-NEXT:    ori $a0, $a0, 1
+; LA64-UAL-NEXT:    ret
+;
+; LA32-NUAL-LABEL: memcmp_size_16:
+; LA32-NUAL:       # %bb.0: # %entry
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, -16
+; LA32-NUAL-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NUAL-NEXT:    ori $a2, $zero, 16
+; LA32-NUAL-NEXT:    bl memcmp
+; LA32-NUAL-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, 16
+; LA32-NUAL-NEXT:    ret
+;
+; LA64-NUAL-LABEL: memcmp_size_16:
+; LA64-NUAL:       # %bb.0: # %entry
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, -16
+; LA64-NUAL-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NUAL-NEXT:    ori $a2, $zero, 16
+; LA64-NUAL-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NUAL-NEXT:    jirl $ra, $ra, 0
+; LA64-NUAL-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, 16
+; LA64-NUAL-NEXT:    ret
 entry:
   %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 16)
   ret i32 %memcmp
 }
 
 define i32 @memcmp_size_31(ptr %s1, ptr %s2) nounwind {
-; LA32-LABEL: memcmp_size_31:
-; LA32:       # %bb.0: # %entry
-; LA32-NEXT:    addi.w $sp, $sp, -16
-; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    ori $a2, $zero, 31
-; LA32-NEXT:    bl memcmp
-; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
-; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    ret
+; LA32-UAL-LABEL: memcmp_size_31:
+; LA32-UAL:       # %bb.0: # %entry
+; LA32-UAL-NEXT:    ld.w $a3, $a0, 0
+; LA32-UAL-NEXT:    ld.w $a4, $a1, 0
+; LA32-UAL-NEXT:    srli.w $a5, $a3, 8
+; LA32-UAL-NEXT:    lu12i.w $a2, 15
+; LA32-UAL-NEXT:    ori $a2, $a2, 3840
+; LA32-UAL-NEXT:    and $a5, $a5, $a2
+; LA32-UAL-NEXT:    srli.w $a6, $a3, 24
+; LA32-UAL-NEXT:    or $a5, $a5, $a6
+; LA32-UAL-NEXT:    and $a6, $a3, $a2
+; LA32-UAL-NEXT:    slli.w $a6, $a6, 8
+; LA32-UAL-NEXT:    slli.w $a3, $a3, 24
+; LA32-UAL-NEXT:    or $a3, $a3, $a6
+; LA32-UAL-NEXT:    or $a3, $a3, $a5
+; LA32-UAL-NEXT:    srli.w $a5, $a4, 8
+; LA32-UAL-NEXT:    and $a5, $a5, $a2
+; LA32-UAL-NEXT:    srli.w $a6, $a4, 24
+; LA32-UAL-NEXT:    or $a5, $a5, $a6
+; LA32-UAL-NEXT:    and $a6, $a4, $a2
+; LA32-UAL-NEXT:    slli.w $a6, $a6, 8
+; LA32-UAL-NEXT:    slli.w $a4, $a4, 24
+; LA32-UAL-NEXT:    or $a4, $a4, $a6
+; LA32-UAL-NEXT:    or $a4, $a4, $a5
+; LA32-UAL-NEXT:    bne $a3, $a4, .LBB34_9
+; LA32-UAL-NEXT:  # %bb.1: # %loadbb1
+; LA32-UAL-NEXT:    ld.w $a3, $a0, 4
+; LA32-UAL-NEXT:    ld.w $a4, $a1, 4
+; LA32-UAL-NEXT:    srli.w $a5, $a3, 8
+; LA32-UAL-NEXT:    and $a5, $a5, $a2
+; LA32-UAL-NEXT:    srli.w $a6, $a3, 24
+; LA32-UAL-NEXT:    or $a5, $a5, $a6
+; LA32-UAL-NEXT:    and $a6, $a3, $a2
+; LA32-UAL-NEXT:    slli.w $a6, $a6, 8
+; LA32-UAL-NEXT:    slli.w $a3, $a3, 24
+; LA32-UAL-NEXT:    or $a3, $a3, $a6
+; LA32-UAL-NEXT:    or $a3, $a3, $a5
+; LA32-UAL-NEXT:    srli.w $a5, $a4, 8
+; LA32-UAL-NEXT:    and $a5, $a5, $a2
+; LA32-UAL-NEXT:    srli.w $a6, $a4, 24
+; LA32-UAL-NEXT:    or $a5, $a5, $a6
+; LA32-UAL-NEXT:    and $a6, $a4, $a2
+; LA32-UAL-NEXT:    slli.w $a6, $a6, 8
+; LA32-UAL-NEXT:    slli.w $a4, $a4, 24
+; LA32-UAL-NEXT:    or $a4, $a4, $a6
+; LA32-UAL-NEXT:    or $a4, $a4, $a5
+; LA32-UAL-NEXT:    bne $a3, $a4, .LBB34_9
+; LA32-UAL-NEXT:  # %bb.2: # %loadbb2
+; LA32-UAL-NEXT:    ld.w $a3, $a0, 8
+; LA32-UAL-NEXT:    ld.w $a4, $a1, 8
+; LA32-UAL-NEXT:    srli.w $a5, $a3, 8
+; LA32-UAL-NEXT:    and $a5, $a5, $a2
+; LA32-UAL-NEXT:    srli.w $a6, $a3, 24
+; LA32-UAL-NEXT:    or $a5, $a5, $a6
+; LA32-UAL-NEXT:    and $a6, $a3, $a2
+; LA32-UAL-NEXT:    slli.w $a6, $a6, 8
+; LA32-UAL-NEXT:    slli.w $a3, $a3, 24
+; LA32-UAL-NEXT:    or $a3, $a3, $a6
+; LA32-UAL-NEXT:    or $a3, $a3, $a5
+; LA32-UAL-NEXT:    srli.w $a5, $a4, 8
+; LA32-UAL-NEXT:    and $a5, $a5, $a2
+; LA32-UAL-NEXT:    srli.w $a6, $a4, 24
+; LA32-UAL-NEXT:    or $a5, $a5, $a6
+; LA32-UAL-NEXT:    and $a6, $a4, $a2
+; LA32-UAL-NEXT:    slli.w $a6, $a6, 8
+; LA32-UAL-NEXT:    slli.w $a4, $a4, 24
+; LA32-UAL-NEXT:    or $a4, $a4, $a6
+; LA32-UAL-NEXT:    or $a4, $a4, $a5
+; LA32-UAL-NEXT:    bne $a3, $a4, .LBB34_9
+; LA32-UAL-NEXT:  # %bb.3: # %loadbb3
+; LA32-UAL-NEXT:    ld.w $a3, $a0, 12
+; LA32-UAL-NEXT:    ld.w $a4, $a1, 12
+; LA32-UAL-NEXT:    srli.w $a5, $a3, 8
+; LA32-UAL-NEXT:    and $a5, $a5, $a2
+; LA32-UAL-NEXT:    srli.w $a6, $a3, 24
+; LA32-UAL-NEXT:    or $a5, $a5, $a6
+; LA32-UAL-NEXT:    and $a6, $a3, $a2
+; LA32-UAL-NEXT:    slli.w $a6, $a6, 8
+; LA32-UAL-NEXT:    slli.w $a3, $a3, 24
+; LA32-UAL-NEXT:    or $a3, $a3, $a6
+; LA32-UAL-NEXT:    or $a3, $a3, $a5
+; LA32-UAL-NEXT:    srli.w $a5, $a4, 8
+; LA32-UAL-NEXT:    and $a5, $a5, $a2
+; LA32-UAL-NEXT:    srli.w $a6, $a4, 24
+; LA32-UAL-NEXT:    or $a5, $a5, $a6
+; LA32-UAL-NEXT:    and $a6, $a4, $a2
+; LA32-UAL-NEXT:    slli.w $a6, $a6, 8
+; LA32-UAL-NEXT:    slli.w $a4, $a4, 24
+; LA32-UAL-NEXT:    or $a4, $a4, $a6
+; LA32-UAL-NEXT:    or $a4, $a4, $a5
+; LA32-UAL-NEXT:    bne $a3, $a4, .LBB34_9
+; LA32-UAL-NEXT:  # %bb.4: # %loadbb4
+; LA32-UAL-NEXT:    ld.w $a3, $a0, 16
+; LA32-UAL-NEXT:    ld.w $a4, $a1, 16
+; LA32-UAL-NEXT:    srli.w $a5, $a3, 8
+; LA32-UAL-NEXT:    and $a5, $a5, $a2
+; LA32-UAL-NEXT:    srli.w $a6, $a3, 24
+; LA32-UAL-NEXT:    or $a5, $a5, $a6
+; LA32-UAL-NEXT:    and $a6, $a3, $a2
+; LA32-UAL-NEXT:    slli.w $a6, $a6, 8
+; LA32-UAL-NEXT:    slli.w $a3, $a3, 24
+; LA32-UAL-NEXT:    or $a3, $a3, $a6
+; LA32-UAL-NEXT:    or $a3, $a3, $a5
+; LA32-UAL-NEXT:    srli.w $a5, $a4, 8
+; LA32-UAL-NEXT:    and $a5, $a5, $a2
+; LA32-UAL-NEXT:    srli.w $a6, $a4, 24
+; LA32-UAL-NEXT:    or $a5, $a5, $a6
+; LA32-UAL-NEXT:    and $a6, $a4, $a2
+; LA32-UAL-NEXT:    slli.w $a6, $a6, 8
+; LA32-UAL-NEXT:    slli.w $a4, $a4, 24
+; LA32-UAL-NEXT:    or $a4, $a4, $a6
+; LA32-UAL-NEXT:    or $a4, $a4, $a5
+; LA32-UAL-NEXT:    bne $a3, $a4, .LBB34_9
+; LA32-UAL-NEXT:  # %bb.5: # %loadbb5
+; LA32-UAL-NEXT:    ld.w $a3, $a0, 20
+; LA32-UAL-NEXT:    ld.w $a4, $a1, 20
+; LA32-UAL-NEXT:    srli.w $a5, $a3, 8
+; LA32-UAL-NEXT:    and $a5, $a5, $a2
+; LA32-UAL-NEXT:    srli.w $a6, $a3, 24
+; LA32-UAL-NEXT:    or $a5, $a5, $a6
+; LA32-UAL-NEXT:    and $a6, $a3, $a2
+; LA32-UAL-NEXT:    slli.w $a6, $a6, 8
+; LA32-UAL-NEXT:    slli.w $a3, $a3, 24
+; LA32-UAL-NEXT:    or $a3, $a3, $a6
+; LA32-UAL-NEXT:    or $a3, $a3, $a5
+; LA32-UAL-NEXT:    srli.w $a5, $a4, 8
+; LA32-UAL-NEXT:    and $a5, $a5, $a2
+; LA32-UAL-NEXT:    srli.w $a6, $a4, 24
+; LA32-UAL-NEXT:    or $a5, $a5, $a6
+; LA32-UAL-NEXT:    and $a6, $a4, $a2
+; LA32-UAL-NEXT:    slli.w $a6, $a6, 8
+; LA32-UAL-NEXT:    slli.w $a4, $a4, 24
+; LA32-UAL-NEXT:    or $a4, $a4, $a6
+; LA32-UAL-NEXT:    or $a4, $a4, $a5
+; LA32-UAL-NEXT:    bne $a3, $a4, .LBB34_9
+; LA32-UAL-NEXT:  # %bb.6: # %loadbb6
+; LA32-UAL-NEXT:    ld.w $a3, $a0, 24
+; LA32-UAL-NEXT:    ld.w $a4, $a1, 24
+; LA32-UAL-NEXT:    srli.w $a5, $a3, 8
+; LA32-UAL-NEXT:    and $a5, $a5, $a2
+; LA32-UAL-NEXT:    srli.w $a6, $a3, 24
+; LA32-UAL-NEXT:    or $a5, $a5, $a6
+; LA32-UAL-NEXT:    and $a6, $a3, $a2
+; LA32-UAL-NEXT:    slli.w $a6, $a6, 8
+; LA32-UAL-NEXT:    slli.w $a3, $a3, 24
+; LA32-UAL-NEXT:    or $a3, $a3, $a6
+; LA32-UAL-NEXT:    or $a3, $a3, $a5
+; LA32-UAL-NEXT:    srli.w $a5, $a4, 8
+; LA32-UAL-NEXT:    and $a5, $a5, $a2
+; LA32-UAL-NEXT:    srli.w $a6, $a4, 24
+; LA32-UAL-NEXT:    or $a5, $a5, $a6
+; LA32-UAL-NEXT:    and $a6, $a4, $a2
+; LA32-UAL-NEXT:    slli.w $a6, $a6, 8
+; LA32-UAL-NEXT:    slli.w $a4, $a4, 24
+; LA32-UAL-NEXT:    or $a4, $a4, $a6
+; LA32-UAL-NEXT:    or $a4, $a4, $a5
+; LA32-UAL-NEXT:    bne $a3, $a4, .LBB34_9
+; LA32-UAL-NEXT:  # %bb.7: # %loadbb7
+; LA32-UAL-NEXT:    ld.w $a0, $a0, 27
+; LA32-UAL-NEXT:    ld.w $a1, $a1, 27
+; LA32-UAL-NEXT:    srli.w $a3, $a0, 8
+; LA32-UAL-NEXT:    and $a3, $a3, $a2
+; LA32-UAL-NEXT:    srli.w $a4, $a0, 24
+; LA32-UAL-NEXT:    or $a3, $a3, $a4
+; LA32-UAL-NEXT:    and $a4, $a0, $a2
+; LA32-UAL-NEXT:    slli.w $a4, $a4, 8
+; LA32-UAL-NEXT:    slli.w $a0, $a0, 24
+; LA32-UAL-NEXT:    or $a0, $a0, $a4
+; LA32-UAL-NEXT:    or $a3, $a0, $a3
+; LA32-UAL-NEXT:    srli.w $a0, $a1, 8
+; LA32-UAL-NEXT:    and $a0, $a0, $a2
+; LA32-UAL-NEXT:    srli.w $a4, $a1, 24
+; LA32-UAL-NEXT:    or $a0, $a0, $a4
+; LA32-UAL-NEXT:    and $a2, $a1, $a2
+; LA32-UAL-NEXT:    slli.w $a2, $a2, 8
+; LA32-UAL-NEXT:    slli.w $a1, $a1, 24
+; LA32-UAL-NEXT:    or $a1, $a1, $a2
+; LA32-UAL-NEXT:    or $a4, $a1, $a0
+; LA32-UAL-NEXT:    bne $a3, $a4, .LBB34_9
+; LA32-UAL-NEXT:  # %bb.8:
+; LA32-UAL-NEXT:    move $a0, $zero
+; LA32-UAL-NEXT:    ret
+; LA32-UAL-NEXT:  .LBB34_9: # %res_block
+; LA32-UAL-NEXT:    sltu $a0, $a3, $a4
+; LA32-UAL-NEXT:    sub.w $a0, $zero, $a0
+; LA32-UAL-NEXT:    ori $a0, $a0, 1
+; LA32-UAL-NEXT:    ret
 ;
-; LA64-LABEL: memcmp_size_31:
-; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    addi.d $sp, $sp, -16
-; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    ori $a2, $zero, 31
-; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
-; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    ret
+; LA64-UAL-LABEL: memcmp_size_31:
+; LA64-UAL:       # %bb.0: # %entry
+; LA64-UAL-NEXT:    ld.d $a2, $a0, 0
+; LA64-UAL-NEXT:    ld.d $a3, $a1, 0
+; LA64-UAL-NEXT:    revb.d $a2, $a2
+; LA64-UAL-NEXT:    revb.d $a3, $a3
+; LA64-UAL-NEXT:    bne $a2, $a3, .LBB34_5
+; LA64-UAL-NEXT:  # %bb.1: # %loadbb1
+; LA64-UAL-NEXT:    ld.d $a2, $a0, 8
+; LA64-UAL-NEXT:    ld.d $a3, $a1, 8
+; LA64-UAL-NEXT:    revb.d $a2, $a2
+; LA64-UAL-NEXT:    revb.d $a3, $a3
+; LA64-UAL-NEXT:    bne $a2, $a3, .LBB34_5
+; LA64-UAL-NEXT:  # %bb.2: # %loadbb2
+; LA64-UAL-NEXT:    ld.d $a2, $a0, 16
+; LA64-UAL-NEXT:    ld.d $a3, $a1, 16
+; LA64-UAL-NEXT:    revb.d $a2, $a2
+; LA64-UAL-NEXT:    revb.d $a3, $a3
+; LA64-UAL-NEXT:    bne $a2, $a3, .LBB34_5
+; LA64-UAL-NEXT:  # %bb.3: # %loadbb3
+; LA64-UAL-NEXT:    ld.d $a0, $a0, 23
+; LA64-UAL-NEXT:    ld.d $a1, $a1, 23
+; LA64-UAL-NEXT:    revb.d $a2, $a0
+; LA64-UAL-NEXT:    revb.d $a3, $a1
+; LA64-UAL-NEXT:    bne $a2, $a3, .LBB34_5
+; LA64-UAL-NEXT:  # %bb.4:
+; LA64-UAL-NEXT:    move $a0, $zero
+; LA64-UAL-NEXT:    ret
+; LA64-UAL-NEXT:  .LBB34_5: # %res_block
+; LA64-UAL-NEXT:    sltu $a0, $a2, $a3
+; LA64-UAL-NEXT:    sub.d $a0, $zero, $a0
+; LA64-UAL-NEXT:    ori $a0, $a0, 1
+; LA64-UAL-NEXT:    ret
+;
+; LA32-NUAL-LABEL: memcmp_size_31:
+; LA32-NUAL:       # %bb.0: # %entry
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, -16
+; LA32-NUAL-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NUAL-NEXT:    ori $a2, $zero, 31
+; LA32-NUAL-NEXT:    bl memcmp
+; LA32-NUAL-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, 16
+; LA32-NUAL-NEXT:    ret
+;
+; LA64-NUAL-LABEL: memcmp_size_31:
+; LA64-NUAL:       # %bb.0: # %entry
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, -16
+; LA64-NUAL-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NUAL-NEXT:    ori $a2, $zero, 31
+; LA64-NUAL-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NUAL-NEXT:    jirl $ra, $ra, 0
+; LA64-NUAL-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, 16
+; LA64-NUAL-NEXT:    ret
 entry:
   %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 31)
   ret i32 %memcmp
 }
 
 define i32 @memcmp_size_32(ptr %s1, ptr %s2) nounwind {
-; LA32-LABEL: memcmp_size_32:
-; LA32:       # %bb.0: # %entry
-; LA32-NEXT:    addi.w $sp, $sp, -16
-; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    ori $a2, $zero, 32
-; LA32-NEXT:    bl memcmp
-; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
-; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    ret
+; LA32-UAL-LABEL: memcmp_size_32:
+; LA32-UAL:       # %bb.0: # %entry
+; LA32-UAL-NEXT:    ld.w $a3, $a0, 0
+; LA32-UAL-NEXT:    ld.w $a4, $a1, 0
+; LA32-UAL-NEXT:    srli.w $a5, $a3, 8
+; LA32-UAL-NEXT:    lu12i.w $a2, 15
+; LA32-UAL-NEXT:    ori $a2, $a2, 3840
+; LA32-UAL-NEXT:    and $a5, $a5, $a2
+; LA32-UAL-NEXT:    srli.w $a6, $a3, 24
+; LA32-UAL-NEXT:    or $a5, $a5, $a6
+; LA32-UAL-NEXT:    and $a6, $a3, $a2
+; LA32-UAL-NEXT:    slli.w $a6, $a6, 8
+; LA32-UAL-NEXT:    slli.w $a3, $a3, 24
+; LA32-UAL-NEXT:    or $a3, $a3, $a6
+; LA32-UAL-NEXT:    or $a3, $a3, $a5
+; LA32-UAL-NEXT:    srli.w $a5, $a4, 8
+; LA32-UAL-NEXT:    and $a5, $a5, $a2
+; LA32-UAL-NEXT:    srli.w $a6, $a4, 24
+; LA32-UAL-NEXT:    or $a5, $a5, $a6
+; LA32-UAL-NEXT:    and $a6, $a4, $a2
+; LA32-UAL-NEXT:    slli.w $a6, $a6, 8
+; LA32-UAL-NEXT:    slli.w $a4, $a4, 24
+; LA32-UAL-NEXT:    or $a4, $a4, $a6
+; LA32-UAL-NEXT:    or $a4, $a4, $a5
+; LA32-UAL-NEXT:    bne $a3, $a4, .LBB35_9
+; LA32-UAL-NEXT:  # %bb.1: # %loadbb1
+; LA32-UAL-NEXT:    ld.w $a3, $a0, 4
+; LA32-UAL-NEXT:    ld.w $a4, $a1, 4
+; LA32-UAL-NEXT:    srli.w $a5, $a3, 8
+; LA32-UAL-NEXT:    and $a5, $a5, $a2
+; LA32-UAL-NEXT:    srli.w $a6, $a3, 24
+; LA32-UAL-NEXT:    or $a5, $a5, $a6
+; LA32-UAL-NEXT:    and $a6, $a3, $a2
+; LA32-UAL-NEXT:    slli.w $a6, $a6, 8
+; LA32-UAL-NEXT:    slli.w $a3, $a3, 24
+; LA32-UAL-NEXT:    or $a3, $a3, $a6
+; LA32-UAL-NEXT:    or $a3, $a3, $a5
+; LA32-UAL-NEXT:    srli.w $a5, $a4, 8
+; LA32-UAL-NEXT:    and $a5, $a5, $a2
+; LA32-UAL-NEXT:    srli.w $a6, $a4, 24
+; LA32-UAL-NEXT:    or $a5, $a5, $a6
+; LA32-UAL-NEXT:    and $a6, $a4, $a2
+; LA32-UAL-NEXT:    slli.w $a6, $a6, 8
+; LA32-UAL-NEXT:    slli.w $a4, $a4, 24
+; LA32-UAL-NEXT:    or $a4, $a4, $a6
+; LA32-UAL-NEXT:    or $a4, $a4, $a5
+; LA32-UAL-NEXT:    bne $a3, $a4, .LBB35_9
+; LA32-UAL-NEXT:  # %bb.2: # %loadbb2
+; LA32-UAL-NEXT:    ld.w $a3, $a0, 8
+; LA32-UAL-NEXT:    ld.w $a4, $a1, 8
+; LA32-UAL-NEXT:    srli.w $a5, $a3, 8
+; LA32-UAL-NEXT:    and $a5, $a5, $a2
+; LA32-UAL-NEXT:    srli.w $a6, $a3, 24
+; LA32-UAL-NEXT:    or $a5, $a5, $a6
+; LA32-UAL-NEXT:    and $a6, $a3, $a2
+; LA32-UAL-NEXT:    slli.w $a6, $a6, 8
+; LA32-UAL-NEXT:    slli.w $a3, $a3, 24
+; LA32-UAL-NEXT:    or $a3, $a3, $a6
+; LA32-UAL-NEXT:    or $a3, $a3, $a5
+; LA32-UAL-NEXT:    srli.w $a5, $a4, 8
+; LA32-UAL-NEXT:    and $a5, $a5, $a2
+; LA32-UAL-NEXT:    srli.w $a6, $a4, 24
+; LA32-UAL-NEXT:    or $a5, $a5, $a6
+; LA32-UAL-NEXT:    and $a6, $a4, $a2
+; LA32-UAL-NEXT:    slli.w $a6, $a6, 8
+; LA32-UAL-NEXT:    slli.w $a4, $a4, 24
+; LA32-UAL-NEXT:    or $a4, $a4, $a6
+; LA32-UAL-NEXT:    or $a4, $a4, $a5
+; LA32-UAL-NEXT:    bne $a3, $a4, .LBB35_9
+; LA32-UAL-NEXT:  # %bb.3: # %loadbb3
+; LA32-UAL-NEXT:    ld.w $a3, $a0, 12
+; LA32-UAL-NEXT:    ld.w $a4, $a1, 12
+; LA32-UAL-NEXT:    srli.w $a5, $a3, 8
+; LA32-UAL-NEXT:    and $a5, $a5, $a2
+; LA32-UAL-NEXT:    srli.w $a6, $a3, 24
+; LA32-UAL-NEXT:    or $a5, $a5, $a6
+; LA32-UAL-NEXT:    and $a6, $a3, $a2
+; LA32-UAL-NEXT:    slli.w $a6, $a6, 8
+; LA32-UAL-NEXT:    slli.w $a3, $a3, 24
+; LA32-UAL-NEXT:    or $a3, $a3, $a6
+; LA32-UAL-NEXT:    or $a3, $a3, $a5
+; LA32-UAL-NEXT:    srli.w $a5, $a4, 8
+; LA32-UAL-NEXT:    and $a5, $a5, $a2
+; LA32-UAL-NEXT:    srli.w $a6, $a4, 24
+; LA32-UAL-NEXT:    or $a5, $a5, $a6
+; LA32-UAL-NEXT:    and $a6, $a4, $a2
+; LA32-UAL-NEXT:    slli.w $a6, $a6, 8
+; LA32-UAL-NEXT:    slli.w $a4, $a4, 24
+; LA32-UAL-NEXT:    or $a4, $a4, $a6
+; LA32-UAL-NEXT:    or $a4, $a4, $a5
+; LA32-UAL-NEXT:    bne $a3, $a4, .LBB35_9
+; LA32-UAL-NEXT:  # %bb.4: # %loadbb4
+; LA32-UAL-NEXT:    ld.w $a3, $a0, 16
+; LA32-UAL-NEXT:    ld.w $a4, $a1, 16
+; LA32-UAL-NEXT:    srli.w $a5, $a3, 8
+; LA32-UAL-NEXT:    and $a5, $a5, $a2
+; LA32-UAL-NEXT:    srli.w $a6, $a3, 24
+; LA32-UAL-NEXT:    or $a5, $a5, $a6
+; LA32-UAL-NEXT:    and $a6, $a3, $a2
+; LA32-UAL-NEXT:    slli.w $a6, $a6, 8
+; LA32-UAL-NEXT:    slli.w $a3, $a3, 24
+; LA32-UAL-NEXT:    or $a3, $a3, $a6
+; LA32-UAL-NEXT:    or $a3, $a3, $a5
+; LA32-UAL-NEXT:    srli.w $a5, $a4, 8
+; LA32-UAL-NEXT:    and $a5, $a5, $a2
+; LA32-UAL-NEXT:    srli.w $a6, $a4, 24
+; LA32-UAL-NEXT:    or $a5, $a5, $a6
+; LA32-UAL-NEXT:    and $a6, $a4, $a2
+; LA32-UAL-NEXT:    slli.w $a6, $a6, 8
+; LA32-UAL-NEXT:    slli.w $a4, $a4, 24
+; LA32-UAL-NEXT:    or $a4, $a4, $a6
+; LA32-UAL-NEXT:    or $a4, $a4, $a5
+; LA32-UAL-NEXT:    bne $a3, $a4, .LBB35_9
+; LA32-UAL-NEXT:  # %bb.5: # %loadbb5
+; LA32-UAL-NEXT:    ld.w $a3, $a0, 20
+; LA32-UAL-NEXT:    ld.w $a4, $a1, 20
+; LA32-UAL-NEXT:    srli.w $a5, $a3, 8
+; LA32-UAL-NEXT:    and $a5, $a5, $a2
+; LA32-UAL-NEXT:    srli.w $a6, $a3, 24
+; LA32-UAL-NEXT:    or $a5, $a5, $a6
+; LA32-UAL-NEXT:    and $a6, $a3, $a2
+; LA32-UAL-NEXT:    slli.w $a6, $a6, 8
+; LA32-UAL-NEXT:    slli.w $a3, $a3, 24
+; LA32-UAL-NEXT:    or $a3, $a3, $a6
+; LA32-UAL-NEXT:    or $a3, $a3, $a5
+; LA32-UAL-NEXT:    srli.w $a5, $a4, 8
+; LA32-UAL-NEXT:    and $a5, $a5, $a2
+; LA32-UAL-NEXT:    srli.w $a6, $a4, 24
+; LA32-UAL-NEXT:    or $a5, $a5, $a6
+; LA32-UAL-NEXT:    and $a6, $a4, $a2
+; LA32-UAL-NEXT:    slli.w $a6, $a6, 8
+; LA32-UAL-NEXT:    slli.w $a4, $a4, 24
+; LA32-UAL-NEXT:    or $a4, $a4, $a6
+; LA32-UAL-NEXT:    or $a4, $a4, $a5
+; LA32-UAL-NEXT:    bne $a3, $a4, .LBB35_9
+; LA32-UAL-NEXT:  # %bb.6: # %loadbb6
+; LA32-UAL-NEXT:    ld.w $a3, $a0, 24
+; LA32-UAL-NEXT:    ld.w $a4, $a1, 24
+; LA32-UAL-NEXT:    srli.w $a5, $a3, 8
+; LA32-UAL-NEXT:    and $a5, $a5, $a2
+; LA32-UAL-NEXT:    srli.w $a6, $a3, 24
+; LA32-UAL-NEXT:    or $a5, $a5, $a6
+; LA32-UAL-NEXT:    and $a6, $a3, $a2
+; LA32-UAL-NEXT:    slli.w $a6, $a6, 8
+; LA32-UAL-NEXT:    slli.w $a3, $a3, 24
+; LA32-UAL-NEXT:    or $a3, $a3, $a6
+; LA32-UAL-NEXT:    or $a3, $a3, $a5
+; LA32-UAL-NEXT:    srli.w $a5, $a4, 8
+; LA32-UAL-NEXT:    and $a5, $a5, $a2
+; LA32-UAL-NEXT:    srli.w $a6, $a4, 24
+; LA32-UAL-NEXT:    or $a5, $a5, $a6
+; LA32-UAL-NEXT:    and $a6, $a4, $a2
+; LA32-UAL-NEXT:    slli.w $a6, $a6, 8
+; LA32-UAL-NEXT:    slli.w $a4, $a4, 24
+; LA32-UAL-NEXT:    or $a4, $a4, $a6
+; LA32-UAL-NEXT:    or $a4, $a4, $a5
+; LA32-UAL-NEXT:    bne $a3, $a4, .LBB35_9
+; LA32-UAL-NEXT:  # %bb.7: # %loadbb7
+; LA32-UAL-NEXT:    ld.w $a0, $a0, 28
+; LA32-UAL-NEXT:    ld.w $a1, $a1, 28
+; LA32-UAL-NEXT:    srli.w $a3, $a0, 8
+; LA32-UAL-NEXT:    and $a3, $a3, $a2
+; LA32-UAL-NEXT:    srli.w $a4, $a0, 24
+; LA32-UAL-NEXT:    or $a3, $a3, $a4
+; LA32-UAL-NEXT:    and $a4, $a0, $a2
+; LA32-UAL-NEXT:    slli.w $a4, $a4, 8
+; LA32-UAL-NEXT:    slli.w $a0, $a0, 24
+; LA32-UAL-NEXT:    or $a0, $a0, $a4
+; LA32-UAL-NEXT:    or $a3, $a0, $a3
+; LA32-UAL-NEXT:    srli.w $a0, $a1, 8
+; LA32-UAL-NEXT:    and $a0, $a0, $a2
+; LA32-UAL-NEXT:    srli.w $a4, $a1, 24
+; LA32-UAL-NEXT:    or $a0, $a0, $a4
+; LA32-UAL-NEXT:    and $a2, $a1, $a2
+; LA32-UAL-NEXT:    slli.w $a2, $a2, 8
+; LA32-UAL-NEXT:    slli.w $a1, $a1, 24
+; LA32-UAL-NEXT:    or $a1, $a1, $a2
+; LA32-UAL-NEXT:    or $a4, $a1, $a0
+; LA32-UAL-NEXT:    bne $a3, $a4, .LBB35_9
+; LA32-UAL-NEXT:  # %bb.8:
+; LA32-UAL-NEXT:    move $a0, $zero
+; LA32-UAL-NEXT:    ret
+; LA32-UAL-NEXT:  .LBB35_9: # %res_block
+; LA32-UAL-NEXT:    sltu $a0, $a3, $a4
+; LA32-UAL-NEXT:    sub.w $a0, $zero, $a0
+; LA32-UAL-NEXT:    ori $a0, $a0, 1
+; LA32-UAL-NEXT:    ret
 ;
-; LA64-LABEL: memcmp_size_32:
-; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    addi.d $sp, $sp, -16
-; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    ori $a2, $zero, 32
-; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
-; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    ret
+; LA64-UAL-LABEL: memcmp_size_32:
+; LA64-UAL:       # %bb.0: # %entry
+; LA64-UAL-NEXT:    ld.d $a2, $a0, 0
+; LA64-UAL-NEXT:    ld.d $a3, $a1, 0
+; LA64-UAL-NEXT:    revb.d $a2, $a2
+; LA64-UAL-NEXT:    revb.d $a3, $a3
+; LA64-UAL-NEXT:    bne $a2, $a3, .LBB35_5
+; LA64-UAL-NEXT:  # %bb.1: # %loadbb1
+; LA64-UAL-NEXT:    ld.d $a2, $a0, 8
+; LA64-UAL-NEXT:    ld.d $a3, $a1, 8
+; LA64-UAL-NEXT:    revb.d $a2, $a2
+; LA64-UAL-NEXT:    revb.d $a3, $a3
+; LA64-UAL-NEXT:    bne $a2, $a3, .LBB35_5
+; LA64-UAL-NEXT:  # %bb.2: # %loadbb2
+; LA64-UAL-NEXT:    ld.d $a2, $a0, 16
+; LA64-UAL-NEXT:    ld.d $a3, $a1, 16
+; LA64-UAL-NEXT:    revb.d $a2, $a2
+; LA64-UAL-NEXT:    revb.d $a3, $a3
+; LA64-UAL-NEXT:    bne $a2, $a3, .LBB35_5
+; LA64-UAL-NEXT:  # %bb.3: # %loadbb3
+; LA64-UAL-NEXT:    ld.d $a0, $a0, 24
+; LA64-UAL-NEXT:    ld.d $a1, $a1, 24
+; LA64-UAL-NEXT:    revb.d $a2, $a0
+; LA64-UAL-NEXT:    revb.d $a3, $a1
+; LA64-UAL-NEXT:    bne $a2, $a3, .LBB35_5
+; LA64-UAL-NEXT:  # %bb.4:
+; LA64-UAL-NEXT:    move $a0, $zero
+; LA64-UAL-NEXT:    ret
+; LA64-UAL-NEXT:  .LBB35_5: # %res_block
+; LA64-UAL-NEXT:    sltu $a0, $a2, $a3
+; LA64-UAL-NEXT:    sub.d $a0, $zero, $a0
+; LA64-UAL-NEXT:    ori $a0, $a0, 1
+; LA64-UAL-NEXT:    ret
+;
+; LA32-NUAL-LABEL: memcmp_size_32:
+; LA32-NUAL:       # %bb.0: # %entry
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, -16
+; LA32-NUAL-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NUAL-NEXT:    ori $a2, $zero, 32
+; LA32-NUAL-NEXT:    bl memcmp
+; LA32-NUAL-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, 16
+; LA32-NUAL-NEXT:    ret
+;
+; LA64-NUAL-LABEL: memcmp_size_32:
+; LA64-NUAL:       # %bb.0: # %entry
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, -16
+; LA64-NUAL-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NUAL-NEXT:    ori $a2, $zero, 32
+; LA64-NUAL-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NUAL-NEXT:    jirl $ra, $ra, 0
+; LA64-NUAL-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, 16
+; LA64-NUAL-NEXT:    ret
 entry:
   %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 32)
   ret i32 %memcmp
@@ -957,16 +2555,74 @@ define i32 @memcmp_size_63(ptr %s1, ptr %s2) nounwind {
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
 ;
-; LA64-LABEL: memcmp_size_63:
-; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    addi.d $sp, $sp, -16
-; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    ori $a2, $zero, 63
-; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
-; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    ret
+; LA64-UAL-LABEL: memcmp_size_63:
+; LA64-UAL:       # %bb.0: # %entry
+; LA64-UAL-NEXT:    ld.d $a2, $a0, 0
+; LA64-UAL-NEXT:    ld.d $a3, $a1, 0
+; LA64-UAL-NEXT:    revb.d $a2, $a2
+; LA64-UAL-NEXT:    revb.d $a3, $a3
+; LA64-UAL-NEXT:    bne $a2, $a3, .LBB36_9
+; LA64-UAL-NEXT:  # %bb.1: # %loadbb1
+; LA64-UAL-NEXT:    ld.d $a2, $a0, 8
+; LA64-UAL-NEXT:    ld.d $a3, $a1, 8
+; LA64-UAL-NEXT:    revb.d $a2, $a2
+; LA64-UAL-NEXT:    revb.d $a3, $a3
+; LA64-UAL-NEXT:    bne $a2, $a3, .LBB36_9
+; LA64-UAL-NEXT:  # %bb.2: # %loadbb2
+; LA64-UAL-NEXT:    ld.d $a2, $a0, 16
+; LA64-UAL-NEXT:    ld.d $a3, $a1, 16
+; LA64-UAL-NEXT:    revb.d $a2, $a2
+; LA64-UAL-NEXT:    revb.d $a3, $a3
+; LA64-UAL-NEXT:    bne $a2, $a3, .LBB36_9
+; LA64-UAL-NEXT:  # %bb.3: # %loadbb3
+; LA64-UAL-NEXT:    ld.d $a2, $a0, 24
+; LA64-UAL-NEXT:    ld.d $a3, $a1, 24
+; LA64-UAL-NEXT:    revb.d $a2, $a2
+; LA64-UAL-NEXT:    revb.d $a3, $a3
+; LA64-UAL-NEXT:    bne $a2, $a3, .LBB36_9
+; LA64-UAL-NEXT:  # %bb.4: # %loadbb4
+; LA64-UAL-NEXT:    ld.d $a2, $a0, 32
+; LA64-UAL-NEXT:    ld.d $a3, $a1, 32
+; LA64-UAL-NEXT:    revb.d $a2, $a2
+; LA64-UAL-NEXT:    revb.d $a3, $a3
+; LA64-UAL-NEXT:    bne $a2, $a3, .LBB36_9
+; LA64-UAL-NEXT:  # %bb.5: # %loadbb5
+; LA64-UAL-NEXT:    ld.d $a2, $a0, 40
+; LA64-UAL-NEXT:    ld.d $a3, $a1, 40
+; LA64-UAL-NEXT:    revb.d $a2, $a2
+; LA64-UAL-NEXT:    revb.d $a3, $a3
+; LA64-UAL-NEXT:    bne $a2, $a3, .LBB36_9
+; LA64-UAL-NEXT:  # %bb.6: # %loadbb6
+; LA64-UAL-NEXT:    ld.d $a2, $a0, 48
+; LA64-UAL-NEXT:    ld.d $a3, $a1, 48
+; LA64-UAL-NEXT:    revb.d $a2, $a2
+; LA64-UAL-NEXT:    revb.d $a3, $a3
+; LA64-UAL-NEXT:    bne $a2, $a3, .LBB36_9
+; LA64-UAL-NEXT:  # %bb.7: # %loadbb7
+; LA64-UAL-NEXT:    ld.d $a0, $a0, 55
+; LA64-UAL-NEXT:    ld.d $a1, $a1, 55
+; LA64-UAL-NEXT:    revb.d $a2, $a0
+; LA64-UAL-NEXT:    revb.d $a3, $a1
+; LA64-UAL-NEXT:    bne $a2, $a3, .LBB36_9
+; LA64-UAL-NEXT:  # %bb.8:
+; LA64-UAL-NEXT:    move $a0, $zero
+; LA64-UAL-NEXT:    ret
+; LA64-UAL-NEXT:  .LBB36_9: # %res_block
+; LA64-UAL-NEXT:    sltu $a0, $a2, $a3
+; LA64-UAL-NEXT:    sub.d $a0, $zero, $a0
+; LA64-UAL-NEXT:    ori $a0, $a0, 1
+; LA64-UAL-NEXT:    ret
+;
+; LA64-NUAL-LABEL: memcmp_size_63:
+; LA64-NUAL:       # %bb.0: # %entry
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, -16
+; LA64-NUAL-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NUAL-NEXT:    ori $a2, $zero, 63
+; LA64-NUAL-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NUAL-NEXT:    jirl $ra, $ra, 0
+; LA64-NUAL-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, 16
+; LA64-NUAL-NEXT:    ret
 entry:
   %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 63)
   ret i32 %memcmp
@@ -983,16 +2639,74 @@ define i32 @memcmp_size_64(ptr %s1, ptr %s2) nounwind {
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
 ;
-; LA64-LABEL: memcmp_size_64:
-; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    addi.d $sp, $sp, -16
-; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    ori $a2, $zero, 64
-; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
-; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    ret
+; LA64-UAL-LABEL: memcmp_size_64:
+; LA64-UAL:       # %bb.0: # %entry
+; LA64-UAL-NEXT:    ld.d $a2, $a0, 0
+; LA64-UAL-NEXT:    ld.d $a3, $a1, 0
+; LA64-UAL-NEXT:    revb.d $a2, $a2
+; LA64-UAL-NEXT:    revb.d $a3, $a3
+; LA64-UAL-NEXT:    bne $a2, $a3, .LBB37_9
+; LA64-UAL-NEXT:  # %bb.1: # %loadbb1
+; LA64-UAL-NEXT:    ld.d $a2, $a0, 8
+; LA64-UAL-NEXT:    ld.d $a3, $a1, 8
+; LA64-UAL-NEXT:    revb.d $a2, $a2
+; LA64-UAL-NEXT:    revb.d $a3, $a3
+; LA64-UAL-NEXT:    bne $a2, $a3, .LBB37_9
+; LA64-UAL-NEXT:  # %bb.2: # %loadbb2
+; LA64-UAL-NEXT:    ld.d $a2, $a0, 16
+; LA64-UAL-NEXT:    ld.d $a3, $a1, 16
+; LA64-UAL-NEXT:    revb.d $a2, $a2
+; LA64-UAL-NEXT:    revb.d $a3, $a3
+; LA64-UAL-NEXT:    bne $a2, $a3, .LBB37_9
+; LA64-UAL-NEXT:  # %bb.3: # %loadbb3
+; LA64-UAL-NEXT:    ld.d $a2, $a0, 24
+; LA64-UAL-NEXT:    ld.d $a3, $a1, 24
+; LA64-UAL-NEXT:    revb.d $a2, $a2
+; LA64-UAL-NEXT:    revb.d $a3, $a3
+; LA64-UAL-NEXT:    bne $a2, $a3, .LBB37_9
+; LA64-UAL-NEXT:  # %bb.4: # %loadbb4
+; LA64-UAL-NEXT:    ld.d $a2, $a0, 32
+; LA64-UAL-NEXT:    ld.d $a3, $a1, 32
+; LA64-UAL-NEXT:    revb.d $a2, $a2
+; LA64-UAL-NEXT:    revb.d $a3, $a3
+; LA64-UAL-NEXT:    bne $a2, $a3, .LBB37_9
+; LA64-UAL-NEXT:  # %bb.5: # %loadbb5
+; LA64-UAL-NEXT:    ld.d $a2, $a0, 40
+; LA64-UAL-NEXT:    ld.d $a3, $a1, 40
+; LA64-UAL-NEXT:    revb.d $a2, $a2
+; LA64-UAL-NEXT:    revb.d $a3, $a3
+; LA64-UAL-NEXT:    bne $a2, $a3, .LBB37_9
+; LA64-UAL-NEXT:  # %bb.6: # %loadbb6
+; LA64-UAL-NEXT:    ld.d $a2, $a0, 48
+; LA64-UAL-NEXT:    ld.d $a3, $a1, 48
+; LA64-UAL-NEXT:    revb.d $a2, $a2
+; LA64-UAL-NEXT:    revb.d $a3, $a3
+; LA64-UAL-NEXT:    bne $a2, $a3, .LBB37_9
+; LA64-UAL-NEXT:  # %bb.7: # %loadbb7
+; LA64-UAL-NEXT:    ld.d $a0, $a0, 56
+; LA64-UAL-NEXT:    ld.d $a1, $a1, 56
+; LA64-UAL-NEXT:    revb.d $a2, $a0
+; LA64-UAL-NEXT:    revb.d $a3, $a1
+; LA64-UAL-NEXT:    bne $a2, $a3, .LBB37_9
+; LA64-UAL-NEXT:  # %bb.8:
+; LA64-UAL-NEXT:    move $a0, $zero
+; LA64-UAL-NEXT:    ret
+; LA64-UAL-NEXT:  .LBB37_9: # %res_block
+; LA64-UAL-NEXT:    sltu $a0, $a2, $a3
+; LA64-UAL-NEXT:    sub.d $a0, $zero, $a0
+; LA64-UAL-NEXT:    ori $a0, $a0, 1
+; LA64-UAL-NEXT:    ret
+;
+; LA64-NUAL-LABEL: memcmp_size_64:
+; LA64-NUAL:       # %bb.0: # %entry
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, -16
+; LA64-NUAL-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NUAL-NEXT:    ori $a2, $zero, 64
+; LA64-NUAL-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NUAL-NEXT:    jirl $ra, $ra, 0
+; LA64-NUAL-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, 16
+; LA64-NUAL-NEXT:    ret
 entry:
   %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 64)
   ret i32 %memcmp
@@ -1075,28 +2789,60 @@ entry:
 }
 
 define i1 @memcmp_eq_zero(ptr %s1, ptr %s2) nounwind {
-; LA32-LABEL: memcmp_eq_zero:
-; LA32:       # %bb.0: # %entry
-; LA32-NEXT:    addi.w $sp, $sp, -16
-; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    ori $a2, $zero, 16
-; LA32-NEXT:    bl memcmp
-; LA32-NEXT:    sltui $a0, $a0, 1
-; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
-; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    ret
+; LA32-UAL-LABEL: memcmp_eq_zero:
+; LA32-UAL:       # %bb.0: # %entry
+; LA32-UAL-NEXT:    ld.w $a2, $a0, 0
+; LA32-UAL-NEXT:    ld.w $a3, $a1, 0
+; LA32-UAL-NEXT:    ld.w $a4, $a0, 4
+; LA32-UAL-NEXT:    ld.w $a5, $a1, 4
+; LA32-UAL-NEXT:    ld.w $a6, $a0, 8
+; LA32-UAL-NEXT:    ld.w $a7, $a1, 8
+; LA32-UAL-NEXT:    ld.w $a0, $a0, 12
+; LA32-UAL-NEXT:    ld.w $a1, $a1, 12
+; LA32-UAL-NEXT:    xor $a2, $a2, $a3
+; LA32-UAL-NEXT:    xor $a3, $a4, $a5
+; LA32-UAL-NEXT:    xor $a4, $a6, $a7
+; LA32-UAL-NEXT:    xor $a0, $a0, $a1
+; LA32-UAL-NEXT:    or $a1, $a2, $a3
+; LA32-UAL-NEXT:    or $a0, $a4, $a0
+; LA32-UAL-NEXT:    or $a0, $a1, $a0
+; LA32-UAL-NEXT:    sltui $a0, $a0, 1
+; LA32-UAL-NEXT:    ret
 ;
-; LA64-LABEL: memcmp_eq_zero:
-; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    addi.d $sp, $sp, -16
-; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    ori $a2, $zero, 16
-; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
-; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    sltui $a0, $a0, 1
-; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    ret
+; LA64-UAL-LABEL: memcmp_eq_zero:
+; LA64-UAL:       # %bb.0: # %entry
+; LA64-UAL-NEXT:    ld.d $a2, $a0, 0
+; LA64-UAL-NEXT:    ld.d $a3, $a1, 0
+; LA64-UAL-NEXT:    ld.d $a0, $a0, 8
+; LA64-UAL-NEXT:    ld.d $a1, $a1, 8
+; LA64-UAL-NEXT:    xor $a2, $a2, $a3
+; LA64-UAL-NEXT:    xor $a0, $a0, $a1
+; LA64-UAL-NEXT:    or $a0, $a2, $a0
+; LA64-UAL-NEXT:    sltui $a0, $a0, 1
+; LA64-UAL-NEXT:    ret
+;
+; LA32-NUAL-LABEL: memcmp_eq_zero:
+; LA32-NUAL:       # %bb.0: # %entry
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, -16
+; LA32-NUAL-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NUAL-NEXT:    ori $a2, $zero, 16
+; LA32-NUAL-NEXT:    bl memcmp
+; LA32-NUAL-NEXT:    sltui $a0, $a0, 1
+; LA32-NUAL-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, 16
+; LA32-NUAL-NEXT:    ret
+;
+; LA64-NUAL-LABEL: memcmp_eq_zero:
+; LA64-NUAL:       # %bb.0: # %entry
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, -16
+; LA64-NUAL-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NUAL-NEXT:    ori $a2, $zero, 16
+; LA64-NUAL-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NUAL-NEXT:    jirl $ra, $ra, 0
+; LA64-NUAL-NEXT:    sltui $a0, $a0, 1
+; LA64-NUAL-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, 16
+; LA64-NUAL-NEXT:    ret
 entry:
   %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 16)
   %ret = icmp eq i32 %memcmp, 0
@@ -1104,28 +2850,66 @@ entry:
 }
 
 define i1 @memcmp_lt_zero(ptr %s1, ptr %s2) nounwind {
-; LA32-LABEL: memcmp_lt_zero:
-; LA32:       # %bb.0: # %entry
-; LA32-NEXT:    addi.w $sp, $sp, -16
-; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    ori $a2, $zero, 4
-; LA32-NEXT:    bl memcmp
-; LA32-NEXT:    srli.w $a0, $a0, 31
-; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
-; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    ret
+; LA32-UAL-LABEL: memcmp_lt_zero:
+; LA32-UAL:       # %bb.0: # %entry
+; LA32-UAL-NEXT:    ld.w $a0, $a0, 0
+; LA32-UAL-NEXT:    ld.w $a1, $a1, 0
+; LA32-UAL-NEXT:    srli.w $a2, $a0, 8
+; LA32-UAL-NEXT:    lu12i.w $a3, 15
+; LA32-UAL-NEXT:    ori $a3, $a3, 3840
+; LA32-UAL-NEXT:    and $a2, $a2, $a3
+; LA32-UAL-NEXT:    srli.w $a4, $a0, 24
+; LA32-UAL-NEXT:    or $a2, $a2, $a4
+; LA32-UAL-NEXT:    and $a4, $a0, $a3
+; LA32-UAL-NEXT:    slli.w $a4, $a4, 8
+; LA32-UAL-NEXT:    slli.w $a0, $a0, 24
+; LA32-UAL-NEXT:    or $a0, $a0, $a4
+; LA32-UAL-NEXT:    or $a0, $a0, $a2
+; LA32-UAL-NEXT:    srli.w $a2, $a1, 8
+; LA32-UAL-NEXT:    and $a2, $a2, $a3
+; LA32-UAL-NEXT:    srli.w $a4, $a1, 24
+; LA32-UAL-NEXT:    or $a2, $a2, $a4
+; LA32-UAL-NEXT:    and $a3, $a1, $a3
+; LA32-UAL-NEXT:    slli.w $a3, $a3, 8
+; LA32-UAL-NEXT:    slli.w $a1, $a1, 24
+; LA32-UAL-NEXT:    or $a1, $a1, $a3
+; LA32-UAL-NEXT:    or $a1, $a1, $a2
+; LA32-UAL-NEXT:    sltu $a0, $a0, $a1
+; LA32-UAL-NEXT:    ret
 ;
-; LA64-LABEL: memcmp_lt_zero:
-; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    addi.d $sp, $sp, -16
-; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    ori $a2, $zero, 4
-; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
-; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    slti $a0, $a0, 0
-; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    ret
+; LA64-UAL-LABEL: memcmp_lt_zero:
+; LA64-UAL:       # %bb.0: # %entry
+; LA64-UAL-NEXT:    ld.w $a0, $a0, 0
+; LA64-UAL-NEXT:    ld.w $a1, $a1, 0
+; LA64-UAL-NEXT:    revb.2w $a0, $a0
+; LA64-UAL-NEXT:    addi.w $a0, $a0, 0
+; LA64-UAL-NEXT:    revb.2w $a1, $a1
+; LA64-UAL-NEXT:    addi.w $a1, $a1, 0
+; LA64-UAL-NEXT:    sltu $a0, $a0, $a1
+; LA64-UAL-NEXT:    ret
+;
+; LA32-NUAL-LABEL: memcmp_lt_zero:
+; LA32-NUAL:       # %bb.0: # %entry
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, -16
+; LA32-NUAL-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NUAL-NEXT:    ori $a2, $zero, 4
+; LA32-NUAL-NEXT:    bl memcmp
+; LA32-NUAL-NEXT:    srli.w $a0, $a0, 31
+; LA32-NUAL-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, 16
+; LA32-NUAL-NEXT:    ret
+;
+; LA64-NUAL-LABEL: memcmp_lt_zero:
+; LA64-NUAL:       # %bb.0: # %entry
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, -16
+; LA64-NUAL-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NUAL-NEXT:    ori $a2, $zero, 4
+; LA64-NUAL-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NUAL-NEXT:    jirl $ra, $ra, 0
+; LA64-NUAL-NEXT:    slti $a0, $a0, 0
+; LA64-NUAL-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, 16
+; LA64-NUAL-NEXT:    ret
 entry:
   %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 4)
   %ret = icmp slt i32 %memcmp, 0
@@ -1133,28 +2917,66 @@ entry:
 }
 
 define i1 @memcmp_gt_zero(ptr %s1, ptr %s2) nounwind {
-; LA32-LABEL: memcmp_gt_zero:
-; LA32:       # %bb.0: # %entry
-; LA32-NEXT:    addi.w $sp, $sp, -16
-; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    ori $a2, $zero, 4
-; LA32-NEXT:    bl memcmp
-; LA32-NEXT:    slt $a0, $zero, $a0
-; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
-; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    ret
+; LA32-UAL-LABEL: memcmp_gt_zero:
+; LA32-UAL:       # %bb.0: # %entry
+; LA32-UAL-NEXT:    ld.w $a0, $a0, 0
+; LA32-UAL-NEXT:    ld.w $a1, $a1, 0
+; LA32-UAL-NEXT:    srli.w $a2, $a0, 8
+; LA32-UAL-NEXT:    lu12i.w $a3, 15
+; LA32-UAL-NEXT:    ori $a3, $a3, 3840
+; LA32-UAL-NEXT:    and $a2, $a2, $a3
+; LA32-UAL-NEXT:    srli.w $a4, $a0, 24
+; LA32-UAL-NEXT:    or $a2, $a2, $a4
+; LA32-UAL-NEXT:    and $a4, $a0, $a3
+; LA32-UAL-NEXT:    slli.w $a4, $a4, 8
+; LA32-UAL-NEXT:    slli.w $a0, $a0, 24
+; LA32-UAL-NEXT:    or $a0, $a0, $a4
+; LA32-UAL-NEXT:    or $a0, $a0, $a2
+; LA32-UAL-NEXT:    srli.w $a2, $a1, 8
+; LA32-UAL-NEXT:    and $a2, $a2, $a3
+; LA32-UAL-NEXT:    srli.w $a4, $a1, 24
+; LA32-UAL-NEXT:    or $a2, $a2, $a4
+; LA32-UAL-NEXT:    and $a3, $a1, $a3
+; LA32-UAL-NEXT:    slli.w $a3, $a3, 8
+; LA32-UAL-NEXT:    slli.w $a1, $a1, 24
+; LA32-UAL-NEXT:    or $a1, $a1, $a3
+; LA32-UAL-NEXT:    or $a1, $a1, $a2
+; LA32-UAL-NEXT:    sltu $a0, $a1, $a0
+; LA32-UAL-NEXT:    ret
 ;
-; LA64-LABEL: memcmp_gt_zero:
-; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    addi.d $sp, $sp, -16
-; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    ori $a2, $zero, 4
-; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
-; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    slt $a0, $zero, $a0
-; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    ret
+; LA64-UAL-LABEL: memcmp_gt_zero:
+; LA64-UAL:       # %bb.0: # %entry
+; LA64-UAL-NEXT:    ld.w $a0, $a0, 0
+; LA64-UAL-NEXT:    ld.w $a1, $a1, 0
+; LA64-UAL-NEXT:    revb.2w $a0, $a0
+; LA64-UAL-NEXT:    addi.w $a0, $a0, 0
+; LA64-UAL-NEXT:    revb.2w $a1, $a1
+; LA64-UAL-NEXT:    addi.w $a1, $a1, 0
+; LA64-UAL-NEXT:    sltu $a0, $a1, $a0
+; LA64-UAL-NEXT:    ret
+;
+; LA32-NUAL-LABEL: memcmp_gt_zero:
+; LA32-NUAL:       # %bb.0: # %entry
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, -16
+; LA32-NUAL-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NUAL-NEXT:    ori $a2, $zero, 4
+; LA32-NUAL-NEXT:    bl memcmp
+; LA32-NUAL-NEXT:    slt $a0, $zero, $a0
+; LA32-NUAL-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, 16
+; LA32-NUAL-NEXT:    ret
+;
+; LA64-NUAL-LABEL: memcmp_gt_zero:
+; LA64-NUAL:       # %bb.0: # %entry
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, -16
+; LA64-NUAL-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NUAL-NEXT:    ori $a2, $zero, 4
+; LA64-NUAL-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NUAL-NEXT:    jirl $ra, $ra, 0
+; LA64-NUAL-NEXT:    slt $a0, $zero, $a0
+; LA64-NUAL-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, 16
+; LA64-NUAL-NEXT:    ret
 entry:
   %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 4)
   %ret = icmp sgt i32 %memcmp, 0
@@ -1162,28 +2984,68 @@ entry:
 }
 
 define i1 @memcmp_le_zero(ptr %s1, ptr %s2) nounwind {
-; LA32-LABEL: memcmp_le_zero:
-; LA32:       # %bb.0: # %entry
-; LA32-NEXT:    addi.w $sp, $sp, -16
-; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    ori $a2, $zero, 4
-; LA32-NEXT:    bl memcmp
-; LA32-NEXT:    slti $a0, $a0, 1
-; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
-; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    ret
+; LA32-UAL-LABEL: memcmp_le_zero:
+; LA32-UAL:       # %bb.0: # %entry
+; LA32-UAL-NEXT:    ld.w $a0, $a0, 0
+; LA32-UAL-NEXT:    ld.w $a1, $a1, 0
+; LA32-UAL-NEXT:    srli.w $a2, $a0, 8
+; LA32-UAL-NEXT:    lu12i.w $a3, 15
+; LA32-UAL-NEXT:    ori $a3, $a3, 3840
+; LA32-UAL-NEXT:    and $a2, $a2, $a3
+; LA32-UAL-NEXT:    srli.w $a4, $a0, 24
+; LA32-UAL-NEXT:    or $a2, $a2, $a4
+; LA32-UAL-NEXT:    and $a4, $a0, $a3
+; LA32-UAL-NEXT:    slli.w $a4, $a4, 8
+; LA32-UAL-NEXT:    slli.w $a0, $a0, 24
+; LA32-UAL-NEXT:    or $a0, $a0, $a4
+; LA32-UAL-NEXT:    or $a0, $a0, $a2
+; LA32-UAL-NEXT:    srli.w $a2, $a1, 8
+; LA32-UAL-NEXT:    and $a2, $a2, $a3
+; LA32-UAL-NEXT:    srli.w $a4, $a1, 24
+; LA32-UAL-NEXT:    or $a2, $a2, $a4
+; LA32-UAL-NEXT:    and $a3, $a1, $a3
+; LA32-UAL-NEXT:    slli.w $a3, $a3, 8
+; LA32-UAL-NEXT:    slli.w $a1, $a1, 24
+; LA32-UAL-NEXT:    or $a1, $a1, $a3
+; LA32-UAL-NEXT:    or $a1, $a1, $a2
+; LA32-UAL-NEXT:    sltu $a0, $a1, $a0
+; LA32-UAL-NEXT:    xori $a0, $a0, 1
+; LA32-UAL-NEXT:    ret
 ;
-; LA64-LABEL: memcmp_le_zero:
-; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    addi.d $sp, $sp, -16
-; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    ori $a2, $zero, 4
-; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
-; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    slti $a0, $a0, 1
-; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    ret
+; LA64-UAL-LABEL: memcmp_le_zero:
+; LA64-UAL:       # %bb.0: # %entry
+; LA64-UAL-NEXT:    ld.w $a0, $a0, 0
+; LA64-UAL-NEXT:    ld.w $a1, $a1, 0
+; LA64-UAL-NEXT:    revb.2w $a0, $a0
+; LA64-UAL-NEXT:    addi.w $a0, $a0, 0
+; LA64-UAL-NEXT:    revb.2w $a1, $a1
+; LA64-UAL-NEXT:    addi.w $a1, $a1, 0
+; LA64-UAL-NEXT:    sltu $a0, $a1, $a0
+; LA64-UAL-NEXT:    xori $a0, $a0, 1
+; LA64-UAL-NEXT:    ret
+;
+; LA32-NUAL-LABEL: memcmp_le_zero:
+; LA32-NUAL:       # %bb.0: # %entry
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, -16
+; LA32-NUAL-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NUAL-NEXT:    ori $a2, $zero, 4
+; LA32-NUAL-NEXT:    bl memcmp
+; LA32-NUAL-NEXT:    slti $a0, $a0, 1
+; LA32-NUAL-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, 16
+; LA32-NUAL-NEXT:    ret
+;
+; LA64-NUAL-LABEL: memcmp_le_zero:
+; LA64-NUAL:       # %bb.0: # %entry
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, -16
+; LA64-NUAL-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NUAL-NEXT:    ori $a2, $zero, 4
+; LA64-NUAL-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NUAL-NEXT:    jirl $ra, $ra, 0
+; LA64-NUAL-NEXT:    slti $a0, $a0, 1
+; LA64-NUAL-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, 16
+; LA64-NUAL-NEXT:    ret
 entry:
   %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 4)
   %ret = icmp slt i32 %memcmp, 1
@@ -1191,37 +3053,72 @@ entry:
 }
 
 define i1 @memcmp_ge_zero(ptr %s1, ptr %s2) nounwind {
-; LA32-LABEL: memcmp_ge_zero:
-; LA32:       # %bb.0: # %entry
-; LA32-NEXT:    addi.w $sp, $sp, -16
-; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    ori $a2, $zero, 4
-; LA32-NEXT:    bl memcmp
-; LA32-NEXT:    addi.w $a1, $zero, -1
-; LA32-NEXT:    slt $a0, $a1, $a0
-; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
-; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    ret
+; LA32-UAL-LABEL: memcmp_ge_zero:
+; LA32-UAL:       # %bb.0: # %entry
+; LA32-UAL-NEXT:    ld.w $a0, $a0, 0
+; LA32-UAL-NEXT:    ld.w $a1, $a1, 0
+; LA32-UAL-NEXT:    srli.w $a2, $a0, 8
+; LA32-UAL-NEXT:    lu12i.w $a3, 15
+; LA32-UAL-NEXT:    ori $a3, $a3, 3840
+; LA32-UAL-NEXT:    and $a2, $a2, $a3
+; LA32-UAL-NEXT:    srli.w $a4, $a0, 24
+; LA32-UAL-NEXT:    or $a2, $a2, $a4
+; LA32-UAL-NEXT:    and $a4, $a0, $a3
+; LA32-UAL-NEXT:    slli.w $a4, $a4, 8
+; LA32-UAL-NEXT:    slli.w $a0, $a0, 24
+; LA32-UAL-NEXT:    or $a0, $a0, $a4
+; LA32-UAL-NEXT:    or $a0, $a0, $a2
+; LA32-UAL-NEXT:    srli.w $a2, $a1, 8
+; LA32-UAL-NEXT:    and $a2, $a2, $a3
+; LA32-UAL-NEXT:    srli.w $a4, $a1, 24
+; LA32-UAL-NEXT:    or $a2, $a2, $a4
+; LA32-UAL-NEXT:    and $a3, $a1, $a3
+; LA32-UAL-NEXT:    slli.w $a3, $a3, 8
+; LA32-UAL-NEXT:    slli.w $a1, $a1, 24
+; LA32-UAL-NEXT:    or $a1, $a1, $a3
+; LA32-UAL-NEXT:    or $a1, $a1, $a2
+; LA32-UAL-NEXT:    sltu $a0, $a0, $a1
+; LA32-UAL-NEXT:    xori $a0, $a0, 1
+; LA32-UAL-NEXT:    ret
 ;
-; LA64-LABEL: memcmp_ge_zero:
-; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    addi.d $sp, $sp, -16
-; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    ori $a2, $zero, 4
-; LA64-NEXT:    pcaddu18i $ra, %call36(memcmp)
-; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    addi.w $a1, $zero, -1
-; LA64-NEXT:    slt $a0, $a1, $a0
-; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    addi.d $sp, $sp, 16
-; LA64-NEXT:    ret
+; LA64-UAL-LABEL: memcmp_ge_zero:
+; LA64-UAL:       # %bb.0: # %entry
+; LA64-UAL-NEXT:    ld.w $a0, $a0, 0
+; LA64-UAL-NEXT:    ld.w $a1, $a1, 0
+; LA64-UAL-NEXT:    revb.2w $a0, $a0
+; LA64-UAL-NEXT:    addi.w $a0, $a0, 0
+; LA64-UAL-NEXT:    revb.2w $a1, $a1
+; LA64-UAL-NEXT:    addi.w $a1, $a1, 0
+; LA64-UAL-NEXT:    sltu $a0, $a0, $a1
+; LA64-UAL-NEXT:    xori $a0, $a0, 1
+; LA64-UAL-NEXT:    ret
+;
+; LA32-NUAL-LABEL: memcmp_ge_zero:
+; LA32-NUAL:       # %bb.0: # %entry
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, -16
+; LA32-NUAL-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NUAL-NEXT:    ori $a2, $zero, 4
+; LA32-NUAL-NEXT:    bl memcmp
+; LA32-NUAL-NEXT:    addi.w $a1, $zero, -1
+; LA32-NUAL-NEXT:    slt $a0, $a1, $a0
+; LA32-NUAL-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NUAL-NEXT:    addi.w $sp, $sp, 16
+; LA32-NUAL-NEXT:    ret
+;
+; LA64-NUAL-LABEL: memcmp_ge_zero:
+; LA64-NUAL:       # %bb.0: # %entry
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, -16
+; LA64-NUAL-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NUAL-NEXT:    ori $a2, $zero, 4
+; LA64-NUAL-NEXT:    pcaddu18i $ra, %call36(memcmp)
+; LA64-NUAL-NEXT:    jirl $ra, $ra, 0
+; LA64-NUAL-NEXT:    addi.w $a1, $zero, -1
+; LA64-NUAL-NEXT:    slt $a0, $a1, $a0
+; LA64-NUAL-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NUAL-NEXT:    addi.d $sp, $sp, 16
+; LA64-NUAL-NEXT:    ret
 entry:
   %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iGRLen 4)
   %ret = icmp sgt i32 %memcmp, -1
   ret i1 %ret
 }
-;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; LA32-NUAL: {{.*}}
-; LA32-UAL: {{.*}}
-; LA64-NUAL: {{.*}}
-; LA64-UAL: {{.*}}
diff --git a/llvm/test/CodeGen/LoongArch/memcmp.ll b/llvm/test/CodeGen/LoongArch/memcmp.ll
index c4aaf9a75a852..c3811c0357793 100644
--- a/llvm/test/CodeGen/LoongArch/memcmp.ll
+++ b/llvm/test/CodeGen/LoongArch/memcmp.ll
@@ -7,15 +7,24 @@
 define signext i32 @test1(ptr %buffer1, ptr %buffer2) {
 ; CHECK-LABEL: test1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi.d $sp, $sp, -16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
-; CHECK-NEXT:    .cfi_offset 1, -8
-; CHECK-NEXT:    ori $a2, $zero, 16
-; CHECK-NEXT:    pcaddu18i $ra, %call36(memcmp)
-; CHECK-NEXT:    jirl $ra, $ra, 0
-; CHECK-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; CHECK-NEXT:    addi.d $sp, $sp, 16
+; CHECK-NEXT:    ld.d $a2, $a0, 0
+; CHECK-NEXT:    ld.d $a3, $a1, 0
+; CHECK-NEXT:    revb.d $a2, $a2
+; CHECK-NEXT:    revb.d $a3, $a3
+; CHECK-NEXT:    bne $a2, $a3, .LBB0_3
+; CHECK-NEXT:  # %bb.1: # %loadbb1
+; CHECK-NEXT:    ld.d $a0, $a0, 8
+; CHECK-NEXT:    ld.d $a1, $a1, 8
+; CHECK-NEXT:    revb.d $a2, $a0
+; CHECK-NEXT:    revb.d $a3, $a1
+; CHECK-NEXT:    bne $a2, $a3, .LBB0_3
+; CHECK-NEXT:  # %bb.2:
+; CHECK-NEXT:    move $a0, $zero
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB0_3: # %res_block
+; CHECK-NEXT:    sltu $a0, $a2, $a3
+; CHECK-NEXT:    sub.d $a0, $zero, $a0
+; CHECK-NEXT:    ori $a0, $a0, 1
 ; CHECK-NEXT:    ret
 entry:
   %call = call signext i32 @memcmp(ptr %buffer1, ptr %buffer2, i64 16)



More information about the llvm-commits mailing list